Path: blob/master/Deep-Learning-with-OpenCV-DNN-Module/python/detection/detect_img.py
3150 views
import cv21import numpy as np23# load the COCO class names4with open('../../input/object_detection_classes_coco.txt', 'r') as f:5class_names = f.read().split('\n')67# get a different color array for each of the classes8COLORS = np.random.uniform(0, 255, size=(len(class_names), 3))910# load the DNN model11model = cv2.dnn.readNet(model='../../input/frozen_inference_graph.pb',12config='../../input/ssd_mobilenet_v2_coco_2018_03_29.pbtxt.txt',13framework='TensorFlow')1415# read the image from disk16image = cv2.imread('../../input/image_2.jpg')17image_height, image_width, _ = image.shape18# create blob from image19blob = cv2.dnn.blobFromImage(image=image, size=(300, 300), mean=(104, 117, 123),20swapRB=True)21# create blob from image22model.setInput(blob)23# forward pass through the model to carry out the detection24output = model.forward()2526# loop over each of the detection27for detection in output[0, 0, :, :]:28# extract the confidence of the detection29confidence = detection[2]30# draw bounding boxes only if the detection confidence is above...31# ... a certain threshold, else skip32if confidence > .4:33# get the class id34class_id = detection[1]35# map the class id to the class36class_name = class_names[int(class_id)-1]37color = COLORS[int(class_id)]38# get the bounding box coordinates39box_x = detection[3] * image_width40box_y = detection[4] * image_height41# get the bounding box width and height42box_width = detection[5] * image_width43box_height = detection[6] * image_height44# draw a rectangle around each detected object45cv2.rectangle(image, (int(box_x), int(box_y)), (int(box_width), int(box_height)), color, thickness=2)46# put the FPS text on top of the frame47cv2.putText(image, class_name, (int(box_x), int(box_y - 5)), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)4849cv2.imshow('image', image)50cv2.imwrite('../../outputs/image_result.jpg', image)51cv2.waitKey(0)52cv2.destroyAllWindows()535455