Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/Efficient-image-loading/benchmark.py
3118 views
1
from argparse import ArgumentParser
2
3
import numpy as np
4
from prettytable import PrettyTable
5
6
from create_lmdb import store_many_lmdb
7
from create_tfrecords import store_many_tfrecords
8
from loader import (
9
CV2Loader,
10
LmdbLoader,
11
PILLoader,
12
TFRecordsLoader,
13
TurboJpegLoader,
14
methods,
15
)
16
from tools import get_images_paths
17
18
19
def count_time(loader, iters):
20
time_list = []
21
num_images = len(loader)
22
for i in range(iters):
23
loader = iter(loader)
24
for idx in range(num_images):
25
image, time = next(loader)
26
time_list.append(time)
27
time_list = np.asarray(time_list)
28
print_stats(time_list, type(loader).__name__)
29
return np.asarray(time_list)
30
31
32
def print_stats(time, name):
33
print("Time measures for {}:".format(name))
34
print("{} mean time - {:.8f} seconds".format(name, time.mean()))
35
print("{} median time - {:.8f} seconds".format(name, np.median(time)))
36
print("{} std time - {:.8f} seconds".format(name, time.std()))
37
print("{} min time - {:.8f} seconds".format(name, time.min()))
38
print("{} max time - {:.8f} seconds".format(name, time.max()))
39
print("\n")
40
41
42
def benchmark(method, path, iters=100, **kwargs):
43
44
image_loader = methods[method](path, **kwargs) # get image loader
45
time = count_time(image_loader, iters) # measure the time for loading
46
47
return time
48
49
50
if __name__ == "__main__":
51
parser = ArgumentParser()
52
53
parser.add_argument(
54
"--path", "-p", type=str, help="path to image folder",
55
)
56
parser.add_argument(
57
"--method",
58
nargs="+",
59
required=True,
60
choices=["cv2", "pil", "turbojpeg", "lmdb", "tfrecords"],
61
help="Image loading methods to use in benchmark",
62
)
63
parser.add_argument(
64
"--mode",
65
"-m",
66
type=str,
67
required=True,
68
choices=["BGR", "RGB"],
69
help="Image color mode",
70
)
71
parser.add_argument(
72
"--iters", type=int, help="Number of iterations to average the results",
73
)
74
args = parser.parse_args()
75
76
benchmark_methods = args.method
77
image_paths = get_images_paths(args.path)
78
79
results = {}
80
for method in benchmark_methods:
81
if method == "lmdb":
82
path = "./lmdb/images"
83
store_many_lmdb(image_paths, path)
84
elif method == "tfrecords":
85
path = "./tfrecords/images.tfrecords"
86
store_many_tfrecords(image_paths, path)
87
else:
88
path = args.path
89
90
time = benchmark(method, path, mode=args.mode, iters=args.iters)
91
results.update({method: time})
92
93
table = PrettyTable(["Loader", "Mean time", "Median time"])
94
95
print(
96
f"Benchmark on {len(image_paths)} {args.mode} images with {args.iters} averaging iteration results:\n",
97
)
98
99
for method, time in results.items():
100
table.add_row([method, time.mean(), np.median(time)])
101
print(table)
102
103