Path: blob/master/modules/dnn/misc/face_detector_accuracy.py
16354 views
# This script is used to estimate an accuracy of different face detection models.1# COCO evaluation tool is used to compute an accuracy metrics (Average Precision).2# Script works with different face detection datasets.3import os4import json5from fnmatch import fnmatch6from math import pi7import cv2 as cv8import argparse9import os10import sys11from pycocotools.coco import COCO12from pycocotools.cocoeval import COCOeval1314parser = argparse.ArgumentParser(15description='Evaluate OpenCV face detection algorithms '16'using COCO evaluation tool, http://cocodataset.org/#detections-eval')17parser.add_argument('--proto', help='Path to .prototxt of Caffe model or .pbtxt of TensorFlow graph')18parser.add_argument('--model', help='Path to .caffemodel trained in Caffe or .pb from TensorFlow')19parser.add_argument('--cascade', help='Optional path to trained Haar cascade as '20'an additional model for evaluation')21parser.add_argument('--ann', help='Path to text file with ground truth annotations')22parser.add_argument('--pics', help='Path to images root directory')23parser.add_argument('--fddb', help='Evaluate FDDB dataset, http://vis-www.cs.umass.edu/fddb/', action='store_true')24parser.add_argument('--wider', help='Evaluate WIDER FACE dataset, http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/', action='store_true')25args = parser.parse_args()2627dataset = {}28dataset['images'] = []29dataset['categories'] = [{ 'id': 0, 'name': 'face' }]30dataset['annotations'] = []3132def ellipse2Rect(params):33rad_x = params[0]34rad_y = params[1]35angle = params[2] * 180.0 / pi36center_x = params[3]37center_y = params[4]38pts = cv.ellipse2Poly((int(center_x), int(center_y)), (int(rad_x), int(rad_y)),39int(angle), 0, 360, 10)40rect = cv.boundingRect(pts)41left = rect[0]42top = rect[1]43right = rect[0] + rect[2]44bottom = rect[1] + rect[3]45return left, top, right, bottom4647def addImage(imagePath):48assert('images' in dataset)49imageId = len(dataset['images'])50dataset['images'].append({51'id': int(imageId),52'file_name': imagePath53})54return imageId5556def addBBox(imageId, left, top, width, height):57assert('annotations' in dataset)58dataset['annotations'].append({59'id': len(dataset['annotations']),60'image_id': int(imageId),61'category_id': 0, # Face62'bbox': [int(left), int(top), int(width), int(height)],63'iscrowd': 0,64'area': float(width * height)65})6667def addDetection(detections, imageId, left, top, width, height, score):68detections.append({69'image_id': int(imageId),70'category_id': 0, # Face71'bbox': [int(left), int(top), int(width), int(height)],72'score': float(score)73})747576def fddb_dataset(annotations, images):77for d in os.listdir(annotations):78if fnmatch(d, 'FDDB-fold-*-ellipseList.txt'):79with open(os.path.join(annotations, d), 'rt') as f:80lines = [line.rstrip('\n') for line in f]81lineId = 082while lineId < len(lines):83# Image84imgPath = lines[lineId]85lineId += 186imageId = addImage(os.path.join(images, imgPath) + '.jpg')8788img = cv.imread(os.path.join(images, imgPath) + '.jpg')8990# Faces91numFaces = int(lines[lineId])92lineId += 193for i in range(numFaces):94params = [float(v) for v in lines[lineId].split()]95lineId += 196left, top, right, bottom = ellipse2Rect(params)97addBBox(imageId, left, top, width=right - left + 1,98height=bottom - top + 1)99100101def wider_dataset(annotations, images):102with open(annotations, 'rt') as f:103lines = [line.rstrip('\n') for line in f]104lineId = 0105while lineId < len(lines):106# Image107imgPath = lines[lineId]108lineId += 1109imageId = addImage(os.path.join(images, imgPath))110111# Faces112numFaces = int(lines[lineId])113lineId += 1114for i in range(numFaces):115params = [int(v) for v in lines[lineId].split()]116lineId += 1117left, top, width, height = params[0], params[1], params[2], params[3]118addBBox(imageId, left, top, width, height)119120def evaluate():121cocoGt = COCO('annotations.json')122cocoDt = cocoGt.loadRes('detections.json')123cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')124cocoEval.evaluate()125cocoEval.accumulate()126cocoEval.summarize()127128129### Convert to COCO annotations format #########################################130assert(args.fddb or args.wider)131if args.fddb:132fddb_dataset(args.ann, args.pics)133elif args.wider:134wider_dataset(args.ann, args.pics)135136with open('annotations.json', 'wt') as f:137json.dump(dataset, f)138139### Obtain detections ##########################################################140detections = []141if args.proto and args.model:142net = cv.dnn.readNet(args.proto, args.model)143144def detect(img, imageId):145imgWidth = img.shape[1]146imgHeight = img.shape[0]147net.setInput(cv.dnn.blobFromImage(img, 1.0, (300, 300), (104., 177., 123.), False, False))148out = net.forward()149150for i in range(out.shape[2]):151confidence = out[0, 0, i, 2]152left = int(out[0, 0, i, 3] * img.shape[1])153top = int(out[0, 0, i, 4] * img.shape[0])154right = int(out[0, 0, i, 5] * img.shape[1])155bottom = int(out[0, 0, i, 6] * img.shape[0])156addDetection(detections, imageId, left, top, width=right - left + 1,157height=bottom - top + 1, score=confidence)158159elif args.cascade:160cascade = cv.CascadeClassifier(args.cascade)161162def detect(img, imageId):163srcImgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)164faces = cascade.detectMultiScale(srcImgGray)165166for rect in faces:167left, top, width, height = rect[0], rect[1], rect[2], rect[3]168addDetection(detections, imageId, left, top, width, height, score=1.0)169170for i in range(len(dataset['images'])):171sys.stdout.write('\r%d / %d' % (i + 1, len(dataset['images'])))172sys.stdout.flush()173174img = cv.imread(dataset['images'][i]['file_name'])175imageId = int(dataset['images'][i]['id'])176177detect(img, imageId)178179with open('detections.json', 'wt') as f:180json.dump(detections, f)181182evaluate()183184185def rm(f):186if os.path.exists(f):187os.remove(f)188189rm('annotations.json')190rm('detections.json')191192193