Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/Background-Subtraction/evaluator.py
3118 views
1
import argparse
2
import glob
3
import os
4
import time
5
6
import cv2
7
import numpy as np
8
import pybgs as bgs
9
10
ALGORITHMS_TO_EVALUATE = [
11
(cv2.bgsegm.createBackgroundSubtractorGSOC, "GSoC", {}),
12
(bgs.SuBSENSE, "SuBSENSE", {}),
13
]
14
15
16
# https://github.com/opencv/opencv_contrib/blob/master/modules/bgsegm/samples/evaluation.py
17
def contains_relevant_files(root):
18
return os.path.isdir(os.path.join(root, "groundtruth")) and os.path.isdir(
19
os.path.join(root, "input"),
20
)
21
22
23
def find_relevant_dirs(root):
24
relevant_dirs = []
25
for d in sorted(os.listdir(root)):
26
d = os.path.join(root, d)
27
if os.path.isdir(d):
28
if contains_relevant_files(d):
29
relevant_dirs += [d]
30
else:
31
relevant_dirs += find_relevant_dirs(d)
32
return relevant_dirs
33
34
35
def load_sequence(root):
36
gt_dir, frames_dir = os.path.join(root, "groundtruth"), os.path.join(root, "input")
37
gt = sorted(glob.glob(os.path.join(gt_dir, "*.png")))
38
f = sorted(glob.glob(os.path.join(frames_dir, "*.jpg")))
39
assert len(gt) == len(f)
40
return gt, f
41
42
43
def evaluate_algorithm(gt, frames, algo, algo_arguments):
44
# instantiate background subtraction model
45
bgs = algo(**algo_arguments)
46
mask = []
47
# start time evaluation
48
t_start = time.time()
49
50
for i in range(len(gt)):
51
# read frames
52
frame = np.uint8(cv2.imread(frames[i], cv2.IMREAD_COLOR))
53
# feed the frames into the model
54
mask.append(bgs.apply(frame))
55
56
average_duration = (time.time() - t_start) / len(gt)
57
average_precision, average_recall, average_f1, average_accuracy = [], [], [], []
58
59
# initiate iteration over GT frames
60
for i in range(len(gt)):
61
# get GT masks
62
gt_mask = np.uint8(cv2.imread(gt[i], cv2.IMREAD_GRAYSCALE))
63
# obtain region of interest
64
roi = (gt_mask == 255) | (gt_mask == 0)
65
if roi.sum() > 0:
66
gt_answer, answer = gt_mask[roi], mask[i][roi]
67
68
# calculate true positives, true negatives, false positives, false negatives
69
tp = ((answer == 255) & (gt_answer == 255)).sum()
70
tn = ((answer == 0) & (gt_answer == 0)).sum()
71
fp = ((answer == 255) & (gt_answer == 0)).sum()
72
fn = ((answer == 0) & (gt_answer == 255)).sum()
73
74
# compute precision, recall, F1, accuracy to evaluate BS-model work
75
if tp + fp > 0:
76
average_precision.append(float(tp) / (tp + fp))
77
if tp + fn > 0:
78
average_recall.append(float(tp) / (tp + fn))
79
if tp + fn + fp > 0:
80
average_f1.append(2.0 * tp / (2.0 * tp + fn + fp))
81
average_accuracy.append(float(tp + tn) / (tp + tn + fp + fn))
82
83
return (
84
average_duration,
85
np.mean(average_precision),
86
np.mean(average_recall),
87
np.mean(average_f1),
88
np.mean(average_accuracy),
89
)
90
91
92
def evaluate_on_sequence(seq, summary):
93
gt, frames = load_sequence(seq)
94
category, video_name = os.path.basename(os.path.dirname(seq)), os.path.basename(seq)
95
print("=== %s:%s ===" % (category, video_name))
96
97
for algo, algo_name, algo_arguments in ALGORITHMS_TO_EVALUATE:
98
print("Algorithm name: %s" % algo_name)
99
sec_per_step, precision, recall, f1, accuracy = evaluate_algorithm(
100
gt, frames, algo, algo_arguments,
101
)
102
print("Average accuracy: %.3f" % accuracy)
103
print("Average precision: %.3f" % precision)
104
print("Average recall: %.3f" % recall)
105
print("Average F1: %.3f" % f1)
106
print("Average sec. per step: %.4f" % sec_per_step)
107
print("")
108
109
if category not in summary:
110
summary[category] = {}
111
if algo_name not in summary[category]:
112
summary[category][algo_name] = []
113
summary[category][algo_name].append((precision, recall, f1, accuracy))
114
115
116
def main():
117
parser = argparse.ArgumentParser(
118
description="Evaluate all background subtractors using Change Detection dataset",
119
)
120
parser.add_argument(
121
"--dataset_path",
122
help="Path to the directory with dataset. It may contain multiple inner directories. It will be scanned recursively.",
123
required=True,
124
)
125
parser.add_argument("--algorithm", help="Test particular algorithm instead of all.")
126
127
args = parser.parse_args()
128
dataset_dirs = find_relevant_dirs(args.dataset_path)
129
assert len(dataset_dirs) > 0, (
130
"Passed directory must contain at least one sequence from the Change Detection dataset. There is no relevant directories in %s. Check that this directory is correct."
131
% (args.dataset_path)
132
)
133
if args.algorithm is not None:
134
global ALGORITHMS_TO_EVALUATE
135
ALGORITHMS_TO_EVALUATE = filter(
136
lambda a: a[1].lower() == args.algorithm.lower(), ALGORITHMS_TO_EVALUATE,
137
)
138
summary = {}
139
140
for seq in dataset_dirs:
141
evaluate_on_sequence(seq, summary)
142
143
for category in summary:
144
for algo_name in summary[category]:
145
summary[category][algo_name] = np.mean(summary[category][algo_name], axis=0)
146
147
algorithms_results = {
148
"GSoC": [],
149
"SuBSENSE": [],
150
}
151
152
for category in summary:
153
print("=== SUMMARY for %s (Precision, Recall, F1, Accuracy) ===" % category)
154
for algo_name in summary[category]:
155
print(
156
"%05s: %.3f %.3f %.3f %.3f"
157
% ((algo_name,) + tuple(summary[category][algo_name])),
158
)
159
algorithms_results[algo_name].append(summary[category][algo_name])
160
161
print("=== SUMMARY for all video categories (Precision, Recall, F1, Accuracy) ===")
162
for algo_name in algorithms_results:
163
algorithms_results[algo_name] = np.mean(
164
np.array(algorithms_results[algo_name]), axis=0,
165
)
166
res_array = algorithms_results[algo_name]
167
print(
168
"{}: {:.3f}, {:.3f}, {:.3f}, {:.3f}".format(
169
algo_name, res_array[0], res_array[1], res_array[2], res_array[3],
170
),
171
)
172
173
174
if __name__ == "__main__":
175
main()
176
177