Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/samples/dnn/mobilenet_ssd_accuracy.py
16337 views
1
from __future__ import print_function
2
# Script to evaluate MobileNet-SSD object detection model trained in TensorFlow
3
# using both TensorFlow and OpenCV. Example:
4
#
5
# python mobilenet_ssd_accuracy.py \
6
# --weights=frozen_inference_graph.pb \
7
# --prototxt=ssd_mobilenet_v1_coco.pbtxt \
8
# --images=val2017 \
9
# --annotations=annotations/instances_val2017.json
10
#
11
# Tested on COCO 2017 object detection dataset, http://cocodataset.org/#download
12
import os
13
import cv2 as cv
14
import json
15
import argparse
16
17
parser = argparse.ArgumentParser(
18
description='Evaluate MobileNet-SSD model using both TensorFlow and OpenCV. '
19
'COCO evaluation framework is required: http://cocodataset.org')
20
parser.add_argument('--weights', required=True,
21
help='Path to frozen_inference_graph.pb of MobileNet-SSD model. '
22
'Download it from http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_11_06_2017.tar.gz')
23
parser.add_argument('--prototxt', help='Path to ssd_mobilenet_v1_coco.pbtxt from opencv_extra.', required=True)
24
parser.add_argument('--images', help='Path to COCO validation images directory.', required=True)
25
parser.add_argument('--annotations', help='Path to COCO annotations file.', required=True)
26
args = parser.parse_args()
27
28
### Get OpenCV predictions #####################################################
29
net = cv.dnn.readNetFromTensorflow(args.weights, args.prototxt)
30
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
31
32
detections = []
33
for imgName in os.listdir(args.images):
34
inp = cv.imread(os.path.join(args.images, imgName))
35
rows = inp.shape[0]
36
cols = inp.shape[1]
37
inp = cv.resize(inp, (300, 300))
38
39
net.setInput(cv.dnn.blobFromImage(inp, 1.0/127.5, (300, 300), (127.5, 127.5, 127.5), True))
40
out = net.forward()
41
42
for i in range(out.shape[2]):
43
score = float(out[0, 0, i, 2])
44
# Confidence threshold is in prototxt.
45
classId = int(out[0, 0, i, 1])
46
47
x = out[0, 0, i, 3] * cols
48
y = out[0, 0, i, 4] * rows
49
w = out[0, 0, i, 5] * cols - x
50
h = out[0, 0, i, 6] * rows - y
51
detections.append({
52
"image_id": int(imgName.rstrip('0')[:imgName.rfind('.')]),
53
"category_id": classId,
54
"bbox": [x, y, w, h],
55
"score": score
56
})
57
58
with open('cv_result.json', 'wt') as f:
59
json.dump(detections, f)
60
61
### Get TensorFlow predictions #################################################
62
import tensorflow as tf
63
64
with tf.gfile.FastGFile(args.weights) as f:
65
# Load the model
66
graph_def = tf.GraphDef()
67
graph_def.ParseFromString(f.read())
68
69
with tf.Session() as sess:
70
# Restore session
71
sess.graph.as_default()
72
tf.import_graph_def(graph_def, name='')
73
74
detections = []
75
for imgName in os.listdir(args.images):
76
inp = cv.imread(os.path.join(args.images, imgName))
77
rows = inp.shape[0]
78
cols = inp.shape[1]
79
inp = cv.resize(inp, (300, 300))
80
inp = inp[:, :, [2, 1, 0]] # BGR2RGB
81
out = sess.run([sess.graph.get_tensor_by_name('num_detections:0'),
82
sess.graph.get_tensor_by_name('detection_scores:0'),
83
sess.graph.get_tensor_by_name('detection_boxes:0'),
84
sess.graph.get_tensor_by_name('detection_classes:0')],
85
feed_dict={'image_tensor:0': inp.reshape(1, inp.shape[0], inp.shape[1], 3)})
86
num_detections = int(out[0][0])
87
for i in range(num_detections):
88
classId = int(out[3][0][i])
89
score = float(out[1][0][i])
90
bbox = [float(v) for v in out[2][0][i]]
91
if score > 0.01:
92
x = bbox[1] * cols
93
y = bbox[0] * rows
94
w = bbox[3] * cols - x
95
h = bbox[2] * rows - y
96
detections.append({
97
"image_id": int(imgName.rstrip('0')[:imgName.rfind('.')]),
98
"category_id": classId,
99
"bbox": [x, y, w, h],
100
"score": score
101
})
102
103
with open('tf_result.json', 'wt') as f:
104
json.dump(detections, f)
105
106
### Evaluation part ############################################################
107
108
# %matplotlib inline
109
import matplotlib.pyplot as plt
110
from pycocotools.coco import COCO
111
from pycocotools.cocoeval import COCOeval
112
import numpy as np
113
import skimage.io as io
114
import pylab
115
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
116
117
annType = ['segm','bbox','keypoints']
118
annType = annType[1] #specify type here
119
prefix = 'person_keypoints' if annType=='keypoints' else 'instances'
120
print('Running demo for *%s* results.'%(annType))
121
122
#initialize COCO ground truth api
123
cocoGt=COCO(args.annotations)
124
125
#initialize COCO detections api
126
for resFile in ['tf_result.json', 'cv_result.json']:
127
print(resFile)
128
cocoDt=cocoGt.loadRes(resFile)
129
130
cocoEval = COCOeval(cocoGt,cocoDt,annType)
131
cocoEval.evaluate()
132
cocoEval.accumulate()
133
cocoEval.summarize()
134
135