Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/AgeGender/AgeGender.py
3118 views
1
# Import required modules
2
import cv2 as cv
3
import math
4
import time
5
import argparse
6
7
def getFaceBox(net, frame, conf_threshold=0.7):
8
frameOpencvDnn = frame.copy()
9
frameHeight = frameOpencvDnn.shape[0]
10
frameWidth = frameOpencvDnn.shape[1]
11
blob = cv.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
12
13
net.setInput(blob)
14
detections = net.forward()
15
bboxes = []
16
for i in range(detections.shape[2]):
17
confidence = detections[0, 0, i, 2]
18
if confidence > conf_threshold:
19
x1 = int(detections[0, 0, i, 3] * frameWidth)
20
y1 = int(detections[0, 0, i, 4] * frameHeight)
21
x2 = int(detections[0, 0, i, 5] * frameWidth)
22
y2 = int(detections[0, 0, i, 6] * frameHeight)
23
bboxes.append([x1, y1, x2, y2])
24
cv.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight/150)), 8)
25
return frameOpencvDnn, bboxes
26
27
28
parser = argparse.ArgumentParser(description='Use this script to run age and gender recognition using OpenCV.')
29
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
30
parser.add_argument("--device", default="cpu", help="Device to inference on")
31
32
args = parser.parse_args()
33
34
35
args = parser.parse_args()
36
37
faceProto = "opencv_face_detector.pbtxt"
38
faceModel = "opencv_face_detector_uint8.pb"
39
40
ageProto = "age_deploy.prototxt"
41
ageModel = "age_net.caffemodel"
42
43
genderProto = "gender_deploy.prototxt"
44
genderModel = "gender_net.caffemodel"
45
46
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
47
ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
48
genderList = ['Male', 'Female']
49
50
# Load network
51
ageNet = cv.dnn.readNet(ageModel, ageProto)
52
genderNet = cv.dnn.readNet(genderModel, genderProto)
53
faceNet = cv.dnn.readNet(faceModel, faceProto)
54
55
56
if args.device == "cpu":
57
ageNet.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
58
59
genderNet.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
60
61
faceNet.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
62
63
print("Using CPU device")
64
elif args.device == "gpu":
65
ageNet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
66
ageNet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
67
68
genderNet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
69
genderNet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
70
71
genderNet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
72
genderNet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
73
print("Using GPU device")
74
75
76
# Open a video file or an image file or a camera stream
77
cap = cv.VideoCapture(args.input if args.input else 0)
78
padding = 20
79
while cv.waitKey(1) < 0:
80
# Read frame
81
t = time.time()
82
hasFrame, frame = cap.read()
83
if not hasFrame:
84
cv.waitKey()
85
break
86
87
frameFace, bboxes = getFaceBox(faceNet, frame)
88
if not bboxes:
89
print("No face Detected, Checking next frame")
90
continue
91
92
for bbox in bboxes:
93
# print(bbox)
94
face = frame[max(0,bbox[1]-padding):min(bbox[3]+padding,frame.shape[0]-1),max(0,bbox[0]-padding):min(bbox[2]+padding, frame.shape[1]-1)]
95
96
blob = cv.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
97
genderNet.setInput(blob)
98
genderPreds = genderNet.forward()
99
gender = genderList[genderPreds[0].argmax()]
100
# print("Gender Output : {}".format(genderPreds))
101
print("Gender : {}, conf = {:.3f}".format(gender, genderPreds[0].max()))
102
103
ageNet.setInput(blob)
104
agePreds = ageNet.forward()
105
age = ageList[agePreds[0].argmax()]
106
print("Age Output : {}".format(agePreds))
107
print("Age : {}, conf = {:.3f}".format(age, agePreds[0].max()))
108
109
label = "{},{}".format(gender, age)
110
cv.putText(frameFace, label, (bbox[0], bbox[1]-10), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2, cv.LINE_AA)
111
cv.imshow("Age Gender Demo", frameFace)
112
# cv.imwrite("age-gender-out-{}".format(args.input),frameFace)
113
print("time : {:.3f}".format(time.time() - t))
114
115
116
117
# cmake -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=~/opencv_gpu -DINSTALL_PYTHON_EXAMPLES=OFF -DINSTALL_C_EXAMPLES=OFF -DOPENCV_ENABLE_NONFREE=ON -DOPENCV_EXTRA_MODULES_PATH=~/cv2_gpu/opencv_contrib/modules -DPYTHON_EXECUTABLE=~/env/bin/python3 -DBUILD_EXAMPLES=ON -DWITH_CUDA=ON -DWITH_CUDNN=ON -DOPENCV_DNN_CUDA=ON -DENABLE_FAST_MATH=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-10.2 -DOpenCL_LIBRARY=/usr/local/cuda-10.2/lib64/libOpenCL.so -DOpenCL_INCLUDE_DIR=/usr/local/cuda-10.2/include/ ..
118