Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/misc/face_detector_accuracy.py
16354 views
1
# This script is used to estimate an accuracy of different face detection models.
2
# COCO evaluation tool is used to compute an accuracy metrics (Average Precision).
3
# Script works with different face detection datasets.
4
import os
5
import json
6
from fnmatch import fnmatch
7
from math import pi
8
import cv2 as cv
9
import argparse
10
import os
11
import sys
12
from pycocotools.coco import COCO
13
from pycocotools.cocoeval import COCOeval
14
15
parser = argparse.ArgumentParser(
16
description='Evaluate OpenCV face detection algorithms '
17
'using COCO evaluation tool, http://cocodataset.org/#detections-eval')
18
parser.add_argument('--proto', help='Path to .prototxt of Caffe model or .pbtxt of TensorFlow graph')
19
parser.add_argument('--model', help='Path to .caffemodel trained in Caffe or .pb from TensorFlow')
20
parser.add_argument('--cascade', help='Optional path to trained Haar cascade as '
21
'an additional model for evaluation')
22
parser.add_argument('--ann', help='Path to text file with ground truth annotations')
23
parser.add_argument('--pics', help='Path to images root directory')
24
parser.add_argument('--fddb', help='Evaluate FDDB dataset, http://vis-www.cs.umass.edu/fddb/', action='store_true')
25
parser.add_argument('--wider', help='Evaluate WIDER FACE dataset, http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/', action='store_true')
26
args = parser.parse_args()
27
28
dataset = {}
29
dataset['images'] = []
30
dataset['categories'] = [{ 'id': 0, 'name': 'face' }]
31
dataset['annotations'] = []
32
33
def ellipse2Rect(params):
34
rad_x = params[0]
35
rad_y = params[1]
36
angle = params[2] * 180.0 / pi
37
center_x = params[3]
38
center_y = params[4]
39
pts = cv.ellipse2Poly((int(center_x), int(center_y)), (int(rad_x), int(rad_y)),
40
int(angle), 0, 360, 10)
41
rect = cv.boundingRect(pts)
42
left = rect[0]
43
top = rect[1]
44
right = rect[0] + rect[2]
45
bottom = rect[1] + rect[3]
46
return left, top, right, bottom
47
48
def addImage(imagePath):
49
assert('images' in dataset)
50
imageId = len(dataset['images'])
51
dataset['images'].append({
52
'id': int(imageId),
53
'file_name': imagePath
54
})
55
return imageId
56
57
def addBBox(imageId, left, top, width, height):
58
assert('annotations' in dataset)
59
dataset['annotations'].append({
60
'id': len(dataset['annotations']),
61
'image_id': int(imageId),
62
'category_id': 0, # Face
63
'bbox': [int(left), int(top), int(width), int(height)],
64
'iscrowd': 0,
65
'area': float(width * height)
66
})
67
68
def addDetection(detections, imageId, left, top, width, height, score):
69
detections.append({
70
'image_id': int(imageId),
71
'category_id': 0, # Face
72
'bbox': [int(left), int(top), int(width), int(height)],
73
'score': float(score)
74
})
75
76
77
def fddb_dataset(annotations, images):
78
for d in os.listdir(annotations):
79
if fnmatch(d, 'FDDB-fold-*-ellipseList.txt'):
80
with open(os.path.join(annotations, d), 'rt') as f:
81
lines = [line.rstrip('\n') for line in f]
82
lineId = 0
83
while lineId < len(lines):
84
# Image
85
imgPath = lines[lineId]
86
lineId += 1
87
imageId = addImage(os.path.join(images, imgPath) + '.jpg')
88
89
img = cv.imread(os.path.join(images, imgPath) + '.jpg')
90
91
# Faces
92
numFaces = int(lines[lineId])
93
lineId += 1
94
for i in range(numFaces):
95
params = [float(v) for v in lines[lineId].split()]
96
lineId += 1
97
left, top, right, bottom = ellipse2Rect(params)
98
addBBox(imageId, left, top, width=right - left + 1,
99
height=bottom - top + 1)
100
101
102
def wider_dataset(annotations, images):
103
with open(annotations, 'rt') as f:
104
lines = [line.rstrip('\n') for line in f]
105
lineId = 0
106
while lineId < len(lines):
107
# Image
108
imgPath = lines[lineId]
109
lineId += 1
110
imageId = addImage(os.path.join(images, imgPath))
111
112
# Faces
113
numFaces = int(lines[lineId])
114
lineId += 1
115
for i in range(numFaces):
116
params = [int(v) for v in lines[lineId].split()]
117
lineId += 1
118
left, top, width, height = params[0], params[1], params[2], params[3]
119
addBBox(imageId, left, top, width, height)
120
121
def evaluate():
122
cocoGt = COCO('annotations.json')
123
cocoDt = cocoGt.loadRes('detections.json')
124
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
125
cocoEval.evaluate()
126
cocoEval.accumulate()
127
cocoEval.summarize()
128
129
130
### Convert to COCO annotations format #########################################
131
assert(args.fddb or args.wider)
132
if args.fddb:
133
fddb_dataset(args.ann, args.pics)
134
elif args.wider:
135
wider_dataset(args.ann, args.pics)
136
137
with open('annotations.json', 'wt') as f:
138
json.dump(dataset, f)
139
140
### Obtain detections ##########################################################
141
detections = []
142
if args.proto and args.model:
143
net = cv.dnn.readNet(args.proto, args.model)
144
145
def detect(img, imageId):
146
imgWidth = img.shape[1]
147
imgHeight = img.shape[0]
148
net.setInput(cv.dnn.blobFromImage(img, 1.0, (300, 300), (104., 177., 123.), False, False))
149
out = net.forward()
150
151
for i in range(out.shape[2]):
152
confidence = out[0, 0, i, 2]
153
left = int(out[0, 0, i, 3] * img.shape[1])
154
top = int(out[0, 0, i, 4] * img.shape[0])
155
right = int(out[0, 0, i, 5] * img.shape[1])
156
bottom = int(out[0, 0, i, 6] * img.shape[0])
157
addDetection(detections, imageId, left, top, width=right - left + 1,
158
height=bottom - top + 1, score=confidence)
159
160
elif args.cascade:
161
cascade = cv.CascadeClassifier(args.cascade)
162
163
def detect(img, imageId):
164
srcImgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
165
faces = cascade.detectMultiScale(srcImgGray)
166
167
for rect in faces:
168
left, top, width, height = rect[0], rect[1], rect[2], rect[3]
169
addDetection(detections, imageId, left, top, width, height, score=1.0)
170
171
for i in range(len(dataset['images'])):
172
sys.stdout.write('\r%d / %d' % (i + 1, len(dataset['images'])))
173
sys.stdout.flush()
174
175
img = cv.imread(dataset['images'][i]['file_name'])
176
imageId = int(dataset['images'][i]['id'])
177
178
detect(img, imageId)
179
180
with open('detections.json', 'wt') as f:
181
json.dump(detections, f)
182
183
evaluate()
184
185
186
def rm(f):
187
if os.path.exists(f):
188
os.remove(f)
189
190
rm('annotations.json')
191
rm('detections.json')
192
193