Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/FaceAverage/faceAverage.py
3118 views
1
#!/usr/bin/env python
2
3
# Copyright (c) 2016 Satya Mallick <[email protected]>
4
# All rights reserved. No warranty, explicit or implicit, provided.
5
6
7
import os
8
import cv2
9
import numpy as np
10
import math
11
import sys
12
13
# Read points from text files in directory
14
def readPoints(path) :
15
# Create an array of array of points.
16
pointsArray = []
17
18
#List all files in the directory and read points from text files one by one
19
for filePath in sorted(os.listdir(path)):
20
21
if filePath.endswith(".txt"):
22
23
#Create an array of points.
24
points = []
25
26
# Read points from filePath
27
with open(os.path.join(path, filePath)) as file :
28
for line in file :
29
x, y = line.split()
30
points.append((int(x), int(y)))
31
32
# Store array of points
33
pointsArray.append(points)
34
35
return pointsArray
36
37
# Read all jpg images in folder.
38
def readImages(path) :
39
40
#Create array of array of images.
41
imagesArray = []
42
43
#List all files in the directory and read points from text files one by one
44
for filePath in sorted(os.listdir(path)):
45
46
if filePath.endswith(".jpg"):
47
# Read image found.
48
img = cv2.imread(os.path.join(path,filePath))
49
50
# Convert to floating point
51
img = np.float32(img)/255.0
52
53
# Add to array of images
54
imagesArray.append(img)
55
56
return imagesArray
57
58
# Compute similarity transform given two sets of two points.
59
# OpenCV requires 3 pairs of corresponding points.
60
# We are faking the third one.
61
62
def similarityTransform(inPoints, outPoints) :
63
s60 = math.sin(60*math.pi/180)
64
c60 = math.cos(60*math.pi/180)
65
66
inPts = np.copy(inPoints).tolist()
67
outPts = np.copy(outPoints).tolist()
68
69
xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0]
70
yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1]
71
72
inPts.append([np.int(xin), np.int(yin)])
73
74
xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]
75
yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]
76
77
outPts.append([np.int(xout), np.int(yout)])
78
79
tform = cv2.estimateAffinePartial2D(np.array([inPts]), np.array([outPts]))
80
81
return tform[0]
82
83
84
# Check if a point is inside a rectangle
85
def rectContains(rect, point) :
86
if point[0] < rect[0] :
87
return False
88
elif point[1] < rect[1] :
89
return False
90
elif point[0] > rect[2] :
91
return False
92
elif point[1] > rect[3] :
93
return False
94
return True
95
96
# Calculate delanauy triangle
97
def calculateDelaunayTriangles(rect, points):
98
# Create subdiv
99
subdiv = cv2.Subdiv2D(rect)
100
101
# Insert points into subdiv
102
for p in points:
103
subdiv.insert((p[0], p[1]))
104
105
106
# List of triangles. Each triangle is a list of 3 points ( 6 numbers )
107
triangleList = subdiv.getTriangleList()
108
109
# Find the indices of triangles in the points array
110
111
delaunayTri = []
112
113
for t in triangleList:
114
pt = []
115
pt.append((t[0], t[1]))
116
pt.append((t[2], t[3]))
117
pt.append((t[4], t[5]))
118
119
pt1 = (t[0], t[1])
120
pt2 = (t[2], t[3])
121
pt3 = (t[4], t[5])
122
123
if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):
124
ind = []
125
for j in range(0, 3):
126
for k in range(0, len(points)):
127
if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):
128
ind.append(k)
129
if len(ind) == 3:
130
delaunayTri.append((ind[0], ind[1], ind[2]))
131
132
133
134
return delaunayTri
135
136
137
def constrainPoint(p, w, h) :
138
p = ( min( max( p[0], 0 ) , w - 1 ) , min( max( p[1], 0 ) , h - 1 ) )
139
return p
140
141
# Apply affine transform calculated using srcTri and dstTri to src and
142
# output an image of size.
143
def applyAffineTransform(src, srcTri, dstTri, size) :
144
145
# Given a pair of triangles, find the affine transform.
146
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
147
148
# Apply the Affine Transform just found to the src image
149
dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
150
151
return dst
152
153
154
# Warps and alpha blends triangular regions from img1 and img2 to img
155
def warpTriangle(img1, img2, t1, t2) :
156
157
# Find bounding rectangle for each triangle
158
r1 = cv2.boundingRect(np.float32([t1]))
159
r2 = cv2.boundingRect(np.float32([t2]))
160
161
# Offset points by left top corner of the respective rectangles
162
t1Rect = []
163
t2Rect = []
164
t2RectInt = []
165
166
for i in range(0, 3):
167
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
168
t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
169
t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
170
171
172
# Get mask by filling triangle
173
mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
174
cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0)
175
176
# Apply warpImage to small rectangular patches
177
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
178
179
size = (r2[2], r2[3])
180
181
img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
182
183
img2Rect = img2Rect * mask
184
185
# Copy triangular region of the rectangular patch to the output image
186
img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )
187
188
img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
189
190
191
192
if __name__ == '__main__' :
193
194
path = 'presidents/'
195
196
# Dimensions of output image
197
w = 600
198
h = 600
199
200
# Read points for all images
201
allPoints = readPoints(path)
202
203
# Read all images
204
images = readImages(path)
205
206
# Eye corners
207
eyecornerDst = [ (np.int(0.3 * w ), np.int(h / 3)), (np.int(0.7 * w ), np.int(h / 3)) ]
208
209
imagesNorm = []
210
pointsNorm = []
211
212
# Add boundary points for delaunay triangulation
213
boundaryPts = np.array([(0,0), (w/2,0), (w-1,0), (w-1,h/2), ( w-1, h-1 ), ( w/2, h-1 ), (0, h-1), (0,h/2) ])
214
215
# Initialize location of average points to 0s
216
pointsAvg = np.array([(0,0)]* ( len(allPoints[0]) + len(boundaryPts) ), np.float32())
217
218
n = len(allPoints[0])
219
220
numImages = len(images)
221
222
# Warp images and trasnform landmarks to output coordinate system,
223
# and find average of transformed landmarks.
224
225
for i in range(0, numImages):
226
227
points1 = allPoints[i]
228
229
# Corners of the eye in input image
230
eyecornerSrc = [ allPoints[i][36], allPoints[i][45] ]
231
232
# Compute similarity transform
233
tform = similarityTransform(eyecornerSrc, eyecornerDst)
234
235
# Apply similarity transformation
236
img = cv2.warpAffine(images[i], tform, (w,h))
237
238
# Apply similarity transform on points
239
points2 = np.reshape(np.array(points1), (68,1,2))
240
241
points = cv2.transform(points2, tform)
242
243
points = np.float32(np.reshape(points, (68, 2)))
244
245
# Append boundary points. Will be used in Delaunay Triangulation
246
points = np.append(points, boundaryPts, axis=0)
247
248
# Calculate location of average landmark points.
249
pointsAvg = pointsAvg + points / numImages
250
251
pointsNorm.append(points)
252
imagesNorm.append(img)
253
254
255
256
# Delaunay triangulation
257
rect = (0, 0, w, h)
258
dt = calculateDelaunayTriangles(rect, np.array(pointsAvg))
259
260
# Output image
261
output = np.zeros((h,w,3), np.float32())
262
263
# Warp input images to average image landmarks
264
for i in range(0, len(imagesNorm)) :
265
img = np.zeros((h,w,3), np.float32())
266
# Transform triangles one by one
267
for j in range(0, len(dt)) :
268
tin = []
269
tout = []
270
271
for k in range(0, 3) :
272
pIn = pointsNorm[i][dt[j][k]]
273
pIn = constrainPoint(pIn, w, h)
274
275
pOut = pointsAvg[dt[j][k]]
276
pOut = constrainPoint(pOut, w, h)
277
278
tin.append(pIn)
279
tout.append(pOut)
280
281
282
warpTriangle(imagesNorm[i], img, tin, tout)
283
284
285
# Add image intensities for averaging
286
output = output + img
287
288
289
# Divide by numImages to get average
290
output = output / numImages
291
292
# Display result
293
cv2.imshow('image', output)
294
cv2.waitKey(0)
295
296