CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hukaixuan19970627

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: hukaixuan19970627/yolov5_obb
Path: blob/master/utils/plots.py
Views: 475
1
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
"""
3
Plotting utils
4
"""
5
6
import math
7
import os
8
from copy import copy
9
from pathlib import Path
10
11
import cv2
12
import matplotlib
13
import matplotlib.pyplot as plt
14
import numpy as np
15
import pandas as pd
16
import seaborn as sn
17
import torch
18
from PIL import Image, ImageDraw, ImageFont
19
20
from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese,
21
try_except, user_config_dir, xywh2xyxy, xyxy2xywh)
22
from utils.metrics import fitness
23
from utils.rboxs_utils import poly2hbb, poly2rbox, rbox2poly
24
25
# Settings
26
CONFIG_DIR = user_config_dir() # Ultralytics settings dir
27
RANK = int(os.getenv('RANK', -1))
28
matplotlib.rc('font', **{'size': 11})
29
matplotlib.use('Agg') # for writing to files only
30
31
32
class Colors:
33
# Ultralytics color palette https://ultralytics.com/
34
def __init__(self):
35
# hex = matplotlib.colors.TABLEAU_COLORS.values()
36
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
37
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
38
self.palette = [self.hex2rgb('#' + c) for c in hex]
39
self.n = len(self.palette)
40
41
def __call__(self, i, bgr=False):
42
c = self.palette[int(i) % self.n]
43
return (c[2], c[1], c[0]) if bgr else c
44
45
@staticmethod
46
def hex2rgb(h): # rgb order (PIL)
47
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
48
49
50
colors = Colors() # create instance for 'from utils.plots import colors'
51
52
53
def check_font(font='Arial.ttf', size=10):
54
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
55
font = Path(font)
56
font = font if font.exists() else (CONFIG_DIR / font.name)
57
try:
58
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
59
except Exception as e: # download if missing
60
url = "https://ultralytics.com/assets/" + font.name
61
print(f'Downloading {url} to {font}...')
62
torch.hub.download_url_to_file(url, str(font), progress=False)
63
try:
64
return ImageFont.truetype(str(font), size)
65
except TypeError:
66
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
67
68
69
class Annotator:
70
if RANK in (-1, 0):
71
check_font() # download TTF if necessary
72
73
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
74
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
75
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
76
self.pil = pil or not is_ascii(example) or is_chinese(example)
77
if self.pil: # use PIL
78
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
79
self.im_cv2 = im
80
self.draw = ImageDraw.Draw(self.im)
81
self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
82
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
83
else: # use cv2
84
self.im = im
85
self.im_cv2 = im
86
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
87
88
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
89
# Add one xyxy box to image with label
90
if self.pil or not is_ascii(label):
91
self.draw.rectangle(box, width=self.lw, outline=color) # box
92
if label:
93
w, h = self.font.getsize(label) # text width, height
94
outside = box[1] - h >= 0 # label fits outside box
95
self.draw.rectangle([box[0],
96
box[1] - h if outside else box[1],
97
box[0] + w + 1,
98
box[1] + 1 if outside else box[1] + h + 1], fill=color)
99
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
100
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
101
else: # cv2
102
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
103
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
104
if label:
105
tf = max(self.lw - 1, 1) # font thickness
106
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
107
outside = p1[1] - h - 3 >= 0 # label fits outside box
108
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
109
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
110
cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
111
thickness=tf, lineType=cv2.LINE_AA)
112
113
def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
114
# if self.pil or not is_ascii(label):
115
# self.draw.polygon(xy=poly, outline=color)
116
# if label:
117
# xmax, xmin, ymax, ymin = max(poly[0::2]), min(poly[0::2]), max(poly[1::2]), min(poly[1::2])
118
# x_label, y_label = (xmax + xmin)/2, (ymax + ymin)/2
119
# w, h = self.font.getsize(label) # text width, height
120
# outside = ymin - h >= 0 # label fits outside box
121
# self.draw.rectangle([x_label,
122
# y_label - h if outside else y_label,
123
# x_label + w + 1,
124
# y_label + 1 if outside else y_label + h + 1], fill=color)
125
# self.draw.text((x_label, y_label - h if outside else y_label), label, fill=txt_color, font=self.font)
126
# else:
127
if isinstance(poly, torch.Tensor):
128
poly = poly.cpu().numpy()
129
if isinstance(poly[0], torch.Tensor):
130
poly = [x.cpu().numpy() for x in poly]
131
polygon_list = np.array([(poly[0], poly[1]), (poly[2], poly[3]), \
132
(poly[4], poly[5]), (poly[6], poly[7])], np.int32)
133
cv2.drawContours(image=self.im_cv2, contours=[polygon_list], contourIdx=-1, color=color, thickness=self.lw)
134
if label:
135
tf = max(self.lw - 1, 1) # font thicknes
136
xmax, xmin, ymax, ymin = max(poly[0::2]), min(poly[0::2]), max(poly[1::2]), min(poly[1::2])
137
x_label, y_label = int((xmax + xmin)/2), int((ymax + ymin)/2)
138
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
139
cv2.rectangle(
140
self.im_cv2,
141
(x_label, y_label),
142
(x_label + w + 1, y_label + int(1.5*h)),
143
color, -1, cv2.LINE_AA
144
)
145
cv2.putText(self.im_cv2, label, (x_label, y_label + h), 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA)
146
self.im = self.im_cv2 if isinstance(self.im_cv2, Image.Image) else Image.fromarray(self.im_cv2)
147
148
def rectangle(self, xy, fill=None, outline=None, width=1):
149
# Add rectangle to image (PIL-only)
150
self.draw.rectangle(xy, fill, outline, width)
151
152
def text(self, xy, text, txt_color=(255, 255, 255)):
153
# Add text to image (PIL-only)
154
w, h = self.font.getsize(text) # text width, height
155
self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
156
157
def result(self):
158
# Return annotated image as array
159
return np.asarray(self.im)
160
161
162
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
163
"""
164
x: Features to be visualized
165
module_type: Module type
166
stage: Module stage within model
167
n: Maximum number of feature maps to plot
168
save_dir: Directory to save results
169
"""
170
if 'Detect' not in module_type:
171
batch, channels, height, width = x.shape # batch, channels, height, width
172
if height > 1 and width > 1:
173
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
174
175
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
176
n = min(n, channels) # number of plots
177
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
178
ax = ax.ravel()
179
plt.subplots_adjust(wspace=0.05, hspace=0.05)
180
for i in range(n):
181
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
182
ax[i].axis('off')
183
184
print(f'Saving {f}... ({n}/{channels})')
185
plt.savefig(f, dpi=300, bbox_inches='tight')
186
plt.close()
187
np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
188
189
190
def hist2d(x, y, n=100):
191
# 2d histogram used in labels.png and evolve.png
192
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
193
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
194
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
195
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
196
return np.log(hist[xidx, yidx])
197
198
199
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
200
from scipy.signal import butter, filtfilt
201
202
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
203
def butter_lowpass(cutoff, fs, order):
204
nyq = 0.5 * fs
205
normal_cutoff = cutoff / nyq
206
return butter(order, normal_cutoff, btype='low', analog=False)
207
208
b, a = butter_lowpass(cutoff, fs, order=order)
209
return filtfilt(b, a, data) # forward-backward filter
210
211
212
def output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)
213
# Convert model output to target format [batch_id, class_id, x, y, l, s, theta, conf]
214
targets = []
215
for i, o in enumerate(output):
216
for *rbox, conf, cls in o.cpu().numpy():
217
targets.append([i, cls, *list(*(np.array(rbox)[None])), conf])
218
return np.array(targets)
219
220
221
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):
222
"""
223
Args:
224
imgs (tensor): (b, 3, height, width)
225
targets_train (tensor): (n_targets, [batch_id clsid cx cy l s theta gaussian_θ_labels]) θ∈[-pi/2, pi/2)
226
targets_pred (array): (n, [batch_id, class_id, cx, cy, l, s, theta, conf]) θ∈[-pi/2, pi/2)
227
paths (list[str,...]): (b)
228
fname (str): (1)
229
names :
230
231
"""
232
# Plot image grid with labels
233
if isinstance(images, torch.Tensor):
234
images = images.cpu().float().numpy()
235
if isinstance(targets, torch.Tensor):
236
targets = targets.cpu().numpy()
237
if np.max(images[0]) <= 1:
238
images *= 255 # de-normalise (optional)
239
bs, _, h, w = images.shape # batch size, _, height, width
240
bs = min(bs, max_subplots) # limit plot images
241
ns = np.ceil(bs ** 0.5) # number of subplots (square)
242
243
# Build Image
244
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
245
for i, im in enumerate(images):
246
if i == max_subplots: # if last batch has fewer images than we expect
247
break
248
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
249
im = im.transpose(1, 2, 0)
250
mosaic[y:y + h, x:x + w, :] = im
251
252
# Resize (optional)
253
scale = max_size / ns / max(h, w)
254
if scale < 1:
255
h = math.ceil(scale * h)
256
w = math.ceil(scale * w)
257
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
258
259
# Annotate
260
fs = int((h + w) * ns * 0.01) # font size
261
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
262
for i in range(i + 1):
263
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
264
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
265
if paths:
266
annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
267
if len(targets) > 0:
268
ti = targets[targets[:, 0] == i] # image targets, (n, [img_index clsid cx cy l s theta gaussian_θ_labels])
269
# boxes = xywh2xyxy(ti[:, 2:6]).T
270
rboxes = ti[:, 2:7]
271
classes = ti[:, 1].astype('int')
272
# labels = ti.shape[1] == 6 # labels if no conf column
273
labels = ti.shape[1] == 187 # labels if no conf column
274
# conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
275
conf = None if labels else ti[:, 7] # check for confidence presence (label vs pred)
276
277
# if boxes.shape[1]:
278
# if boxes.max() <= 1.01: # if normalized with tolerance 0.01
279
# boxes[[0, 2]] *= w # scale to pixels
280
# boxes[[1, 3]] *= h
281
# elif scale < 1: # absolute coords need scale if image scales
282
# boxes *= scale
283
polys = rbox2poly(rboxes)
284
if scale < 1:
285
polys *= scale
286
# boxes[[0, 2]] += x
287
# boxes[[1, 3]] += y
288
polys[:, [0, 2, 4, 6]] += x
289
polys[:, [1, 3, 5, 7]] += y
290
# for j, box in enumerate(boxes.T.tolist()):
291
# cls = classes[j]
292
# color = colors(cls)
293
# cls = names[cls] if names else cls
294
# if labels or conf[j] > 0.25: # 0.25 conf thresh
295
# label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
296
# annotator.box_label(box, label, color=color)
297
for j, poly in enumerate(polys.tolist()):
298
cls = classes[j]
299
color = colors(cls)
300
cls = names[cls] if names else cls
301
if labels or conf[j] > 0.25: # 0.25 conf thresh
302
label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
303
annotator.poly_label(poly, label, color=color)
304
annotator.im.save(fname) # save
305
306
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
307
# Plot LR simulating training for full epochs
308
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
309
y = []
310
for _ in range(epochs):
311
scheduler.step()
312
y.append(optimizer.param_groups[0]['lr'])
313
plt.plot(y, '.-', label='LR')
314
plt.xlabel('epoch')
315
plt.ylabel('LR')
316
plt.grid()
317
plt.xlim(0, epochs)
318
plt.ylim(0)
319
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
320
plt.close()
321
322
323
def plot_val_txt(): # from utils.plots import *; plot_val()
324
# Plot val.txt histograms
325
x = np.loadtxt('val.txt', dtype=np.float32)
326
box = xyxy2xywh(x[:, :4])
327
cx, cy = box[:, 0], box[:, 1]
328
329
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
330
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
331
ax.set_aspect('equal')
332
plt.savefig('hist2d.png', dpi=300)
333
334
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
335
ax[0].hist(cx, bins=600)
336
ax[1].hist(cy, bins=600)
337
plt.savefig('hist1d.png', dpi=200)
338
339
340
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
341
# Plot targets.txt histograms
342
x = np.loadtxt('targets.txt', dtype=np.float32).T
343
s = ['x targets', 'y targets', 'width targets', 'height targets']
344
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
345
ax = ax.ravel()
346
for i in range(4):
347
ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
348
ax[i].legend()
349
ax[i].set_title(s[i])
350
plt.savefig('targets.jpg', dpi=200)
351
352
353
def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
354
# Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
355
save_dir = Path(file).parent if file else Path(dir)
356
plot2 = False # plot additional results
357
if plot2:
358
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
359
360
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
361
# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
362
for f in sorted(save_dir.glob('study*.txt')):
363
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
364
x = np.arange(y.shape[1]) if x is None else np.array(x)
365
if plot2:
366
s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
367
for i in range(7):
368
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
369
ax[i].set_title(s[i])
370
371
j = y[3].argmax() + 1
372
ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
373
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
374
375
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
376
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
377
378
ax2.grid(alpha=0.2)
379
ax2.set_yticks(np.arange(20, 60, 5))
380
ax2.set_xlim(0, 57)
381
ax2.set_ylim(25, 55)
382
ax2.set_xlabel('GPU Speed (ms/img)')
383
ax2.set_ylabel('COCO AP val')
384
ax2.legend(loc='lower right')
385
f = save_dir / 'study.png'
386
print(f'Saving {f}...')
387
plt.savefig(f, dpi=300)
388
389
390
@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395
391
@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611
392
def plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):
393
rboxes = poly2rbox(labels[:, 1:])
394
labels = np.concatenate((labels[:, :1], rboxes[:, :-1]), axis=1) # [cls xyls]
395
396
# plot dataset labels
397
LOGGER.info(f"Plotting labels to {save_dir / 'labels_xyls.jpg'}... ")
398
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, hboxes(xyls)
399
nc = int(c.max() + 1) # number of classes
400
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'long_edge', 'short_edge'])
401
402
# seaborn correlogram
403
sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
404
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
405
plt.close()
406
407
# matplotlib labels
408
matplotlib.use('svg') # faster
409
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
410
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
411
# [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
412
ax[0].set_ylabel('instances')
413
if 0 < len(names) < 30:
414
ax[0].set_xticks(range(len(names)))
415
ax[0].set_xticklabels(names, rotation=90, fontsize=10)
416
else:
417
ax[0].set_xlabel('classes')
418
sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
419
sn.histplot(x, x='long_edge', y='short_edge', ax=ax[3], bins=50, pmax=0.9)
420
421
# rectangles
422
# labels[:, 1:3] = 0.5 # center
423
labels[:, 1:3] = 0.5 * img_size # center
424
# labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
425
labels[:, 1:] = xywh2xyxy(labels[:, 1:])
426
# img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
427
img = Image.fromarray(np.ones((img_size, img_size, 3), dtype=np.uint8) * 255)
428
for cls, *box in labels[:1000]:
429
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
430
ax[1].imshow(img)
431
ax[1].axis('off')
432
433
for a in [0, 1, 2, 3]:
434
for s in ['top', 'right', 'left', 'bottom']:
435
ax[a].spines[s].set_visible(False)
436
437
plt.savefig(save_dir / 'labels_xyls.jpg', dpi=200)
438
matplotlib.use('Agg')
439
plt.close()
440
441
442
def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
443
# Plot evolve.csv hyp evolution results
444
evolve_csv = Path(evolve_csv)
445
data = pd.read_csv(evolve_csv)
446
keys = [x.strip() for x in data.columns]
447
x = data.values
448
f = fitness(x)
449
j = np.argmax(f) # max fitness index
450
plt.figure(figsize=(10, 12), tight_layout=True)
451
matplotlib.rc('font', **{'size': 8})
452
for i, k in enumerate(keys[7:]):
453
v = x[:, 7 + i]
454
mu = v[j] # best single result
455
plt.subplot(6, 5, i + 1)
456
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
457
plt.plot(mu, f.max(), 'k+', markersize=15)
458
plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
459
if i % 5 != 0:
460
plt.yticks([])
461
print(f'{k:>15}: {mu:.3g}')
462
f = evolve_csv.with_suffix('.png') # filename
463
plt.savefig(f, dpi=200)
464
plt.close()
465
print(f'Saved {f}')
466
467
468
def plot_results(file='path/to/results.csv', dir=''):
469
# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
470
save_dir = Path(file).parent if file else Path(dir)
471
#fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
472
fig, ax = plt.subplots(2, 6, figsize=(18, 6), tight_layout=True)
473
ax = ax.ravel()
474
files = list(save_dir.glob('results*.csv'))
475
assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
476
for fi, f in enumerate(files):
477
try:
478
data = pd.read_csv(f)
479
s = [x.strip() for x in data.columns]
480
x = data.values[:, 0]
481
#for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
482
for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 7, 8]):
483
y = data.values[:, j]
484
# y[y == 0] = np.nan # don't show zero values
485
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
486
ax[i].set_title(s[j], fontsize=12)
487
# if j in [8, 9, 10]: # share train and val loss y axes
488
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
489
except Exception as e:
490
print(f'Warning: Plotting error for {f}: {e}')
491
ax[1].legend()
492
fig.savefig(save_dir / 'results.png', dpi=200)
493
plt.close()
494
495
496
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
497
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
498
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
499
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
500
files = list(Path(save_dir).glob('frames*.txt'))
501
for fi, f in enumerate(files):
502
try:
503
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
504
n = results.shape[1] # number of rows
505
x = np.arange(start, min(stop, n) if stop else n)
506
results = results[:, x]
507
t = (results[0] - results[0].min()) # set t0=0s
508
results[0] = x
509
for i, a in enumerate(ax):
510
if i < len(results):
511
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
512
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
513
a.set_title(s[i])
514
a.set_xlabel('time (s)')
515
# if fi == len(files) - 1:
516
# a.set_ylim(bottom=0)
517
for side in ['top', 'right']:
518
a.spines[side].set_visible(False)
519
else:
520
a.remove()
521
except Exception as e:
522
print(f'Warning: Plotting error for {f}; {e}')
523
ax[1].legend()
524
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
525
526
527
def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
528
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
529
xyxy = torch.tensor(xyxy).view(-1, 4)
530
b = xyxy2xywh(xyxy) # boxes
531
if square:
532
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
533
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
534
xyxy = xywh2xyxy(b).long()
535
clip_coords(xyxy, im.shape)
536
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
537
if save:
538
file.parent.mkdir(parents=True, exist_ok=True) # make directory
539
cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop)
540
return crop
541
542