Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/utils/plots.py
Views: 475
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license1"""2Plotting utils3"""45import math6import os7from copy import copy8from pathlib import Path910import cv211import matplotlib12import matplotlib.pyplot as plt13import numpy as np14import pandas as pd15import seaborn as sn16import torch17from PIL import Image, ImageDraw, ImageFont1819from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese,20try_except, user_config_dir, xywh2xyxy, xyxy2xywh)21from utils.metrics import fitness22from utils.rboxs_utils import poly2hbb, poly2rbox, rbox2poly2324# Settings25CONFIG_DIR = user_config_dir() # Ultralytics settings dir26RANK = int(os.getenv('RANK', -1))27matplotlib.rc('font', **{'size': 11})28matplotlib.use('Agg') # for writing to files only293031class Colors:32# Ultralytics color palette https://ultralytics.com/33def __init__(self):34# hex = matplotlib.colors.TABLEAU_COLORS.values()35hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',36'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')37self.palette = [self.hex2rgb('#' + c) for c in hex]38self.n = len(self.palette)3940def __call__(self, i, bgr=False):41c = self.palette[int(i) % self.n]42return (c[2], c[1], c[0]) if bgr else c4344@staticmethod45def hex2rgb(h): # rgb order (PIL)46return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))474849colors = Colors() # create instance for 'from utils.plots import colors'505152def check_font(font='Arial.ttf', size=10):53# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary54font = Path(font)55font = font if font.exists() else (CONFIG_DIR / font.name)56try:57return ImageFont.truetype(str(font) if font.exists() else font.name, size)58except Exception as e: # download if missing59url = "https://ultralytics.com/assets/" + font.name60print(f'Downloading {url} to {font}...')61torch.hub.download_url_to_file(url, str(font), progress=False)62try:63return ImageFont.truetype(str(font), size)64except TypeError:65check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374666768class Annotator:69if RANK in (-1, 0):70check_font() # download TTF if necessary7172# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations73def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):74assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'75self.pil = pil or not is_ascii(example) or is_chinese(example)76if self.pil: # use PIL77self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)78self.im_cv2 = im79self.draw = ImageDraw.Draw(self.im)80self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,81size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))82else: # use cv283self.im = im84self.im_cv2 = im85self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width8687def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):88# Add one xyxy box to image with label89if self.pil or not is_ascii(label):90self.draw.rectangle(box, width=self.lw, outline=color) # box91if label:92w, h = self.font.getsize(label) # text width, height93outside = box[1] - h >= 0 # label fits outside box94self.draw.rectangle([box[0],95box[1] - h if outside else box[1],96box[0] + w + 1,97box[1] + 1 if outside else box[1] + h + 1], fill=color)98# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.099self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)100else: # cv2101p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))102cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)103if label:104tf = max(self.lw - 1, 1) # font thickness105w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height106outside = p1[1] - h - 3 >= 0 # label fits outside box107p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3108cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled109cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,110thickness=tf, lineType=cv2.LINE_AA)111112def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):113# if self.pil or not is_ascii(label):114# self.draw.polygon(xy=poly, outline=color)115# if label:116# xmax, xmin, ymax, ymin = max(poly[0::2]), min(poly[0::2]), max(poly[1::2]), min(poly[1::2])117# x_label, y_label = (xmax + xmin)/2, (ymax + ymin)/2118# w, h = self.font.getsize(label) # text width, height119# outside = ymin - h >= 0 # label fits outside box120# self.draw.rectangle([x_label,121# y_label - h if outside else y_label,122# x_label + w + 1,123# y_label + 1 if outside else y_label + h + 1], fill=color)124# self.draw.text((x_label, y_label - h if outside else y_label), label, fill=txt_color, font=self.font)125# else:126if isinstance(poly, torch.Tensor):127poly = poly.cpu().numpy()128if isinstance(poly[0], torch.Tensor):129poly = [x.cpu().numpy() for x in poly]130polygon_list = np.array([(poly[0], poly[1]), (poly[2], poly[3]), \131(poly[4], poly[5]), (poly[6], poly[7])], np.int32)132cv2.drawContours(image=self.im_cv2, contours=[polygon_list], contourIdx=-1, color=color, thickness=self.lw)133if label:134tf = max(self.lw - 1, 1) # font thicknes135xmax, xmin, ymax, ymin = max(poly[0::2]), min(poly[0::2]), max(poly[1::2]), min(poly[1::2])136x_label, y_label = int((xmax + xmin)/2), int((ymax + ymin)/2)137w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height138cv2.rectangle(139self.im_cv2,140(x_label, y_label),141(x_label + w + 1, y_label + int(1.5*h)),142color, -1, cv2.LINE_AA143)144cv2.putText(self.im_cv2, label, (x_label, y_label + h), 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA)145self.im = self.im_cv2 if isinstance(self.im_cv2, Image.Image) else Image.fromarray(self.im_cv2)146147def rectangle(self, xy, fill=None, outline=None, width=1):148# Add rectangle to image (PIL-only)149self.draw.rectangle(xy, fill, outline, width)150151def text(self, xy, text, txt_color=(255, 255, 255)):152# Add text to image (PIL-only)153w, h = self.font.getsize(text) # text width, height154self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)155156def result(self):157# Return annotated image as array158return np.asarray(self.im)159160161def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):162"""163x: Features to be visualized164module_type: Module type165stage: Module stage within model166n: Maximum number of feature maps to plot167save_dir: Directory to save results168"""169if 'Detect' not in module_type:170batch, channels, height, width = x.shape # batch, channels, height, width171if height > 1 and width > 1:172f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename173174blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels175n = min(n, channels) # number of plots176fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols177ax = ax.ravel()178plt.subplots_adjust(wspace=0.05, hspace=0.05)179for i in range(n):180ax[i].imshow(blocks[i].squeeze()) # cmap='gray'181ax[i].axis('off')182183print(f'Saving {f}... ({n}/{channels})')184plt.savefig(f, dpi=300, bbox_inches='tight')185plt.close()186np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save187188189def hist2d(x, y, n=100):190# 2d histogram used in labels.png and evolve.png191xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)192hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))193xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)194yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)195return np.log(hist[xidx, yidx])196197198def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):199from scipy.signal import butter, filtfilt200201# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy202def butter_lowpass(cutoff, fs, order):203nyq = 0.5 * fs204normal_cutoff = cutoff / nyq205return butter(order, normal_cutoff, btype='low', analog=False)206207b, a = butter_lowpass(cutoff, fs, order=order)208return filtfilt(b, a, data) # forward-backward filter209210211def output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)212# Convert model output to target format [batch_id, class_id, x, y, l, s, theta, conf]213targets = []214for i, o in enumerate(output):215for *rbox, conf, cls in o.cpu().numpy():216targets.append([i, cls, *list(*(np.array(rbox)[None])), conf])217return np.array(targets)218219220def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):221"""222Args:223imgs (tensor): (b, 3, height, width)224targets_train (tensor): (n_targets, [batch_id clsid cx cy l s theta gaussian_θ_labels]) θ∈[-pi/2, pi/2)225targets_pred (array): (n, [batch_id, class_id, cx, cy, l, s, theta, conf]) θ∈[-pi/2, pi/2)226paths (list[str,...]): (b)227fname (str): (1)228names :229230"""231# Plot image grid with labels232if isinstance(images, torch.Tensor):233images = images.cpu().float().numpy()234if isinstance(targets, torch.Tensor):235targets = targets.cpu().numpy()236if np.max(images[0]) <= 1:237images *= 255 # de-normalise (optional)238bs, _, h, w = images.shape # batch size, _, height, width239bs = min(bs, max_subplots) # limit plot images240ns = np.ceil(bs ** 0.5) # number of subplots (square)241242# Build Image243mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init244for i, im in enumerate(images):245if i == max_subplots: # if last batch has fewer images than we expect246break247x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin248im = im.transpose(1, 2, 0)249mosaic[y:y + h, x:x + w, :] = im250251# Resize (optional)252scale = max_size / ns / max(h, w)253if scale < 1:254h = math.ceil(scale * h)255w = math.ceil(scale * w)256mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))257258# Annotate259fs = int((h + w) * ns * 0.01) # font size260annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)261for i in range(i + 1):262x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin263annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders264if paths:265annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames266if len(targets) > 0:267ti = targets[targets[:, 0] == i] # image targets, (n, [img_index clsid cx cy l s theta gaussian_θ_labels])268# boxes = xywh2xyxy(ti[:, 2:6]).T269rboxes = ti[:, 2:7]270classes = ti[:, 1].astype('int')271# labels = ti.shape[1] == 6 # labels if no conf column272labels = ti.shape[1] == 187 # labels if no conf column273# conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)274conf = None if labels else ti[:, 7] # check for confidence presence (label vs pred)275276# if boxes.shape[1]:277# if boxes.max() <= 1.01: # if normalized with tolerance 0.01278# boxes[[0, 2]] *= w # scale to pixels279# boxes[[1, 3]] *= h280# elif scale < 1: # absolute coords need scale if image scales281# boxes *= scale282polys = rbox2poly(rboxes)283if scale < 1:284polys *= scale285# boxes[[0, 2]] += x286# boxes[[1, 3]] += y287polys[:, [0, 2, 4, 6]] += x288polys[:, [1, 3, 5, 7]] += y289# for j, box in enumerate(boxes.T.tolist()):290# cls = classes[j]291# color = colors(cls)292# cls = names[cls] if names else cls293# if labels or conf[j] > 0.25: # 0.25 conf thresh294# label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'295# annotator.box_label(box, label, color=color)296for j, poly in enumerate(polys.tolist()):297cls = classes[j]298color = colors(cls)299cls = names[cls] if names else cls300if labels or conf[j] > 0.25: # 0.25 conf thresh301label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'302annotator.poly_label(poly, label, color=color)303annotator.im.save(fname) # save304305def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):306# Plot LR simulating training for full epochs307optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals308y = []309for _ in range(epochs):310scheduler.step()311y.append(optimizer.param_groups[0]['lr'])312plt.plot(y, '.-', label='LR')313plt.xlabel('epoch')314plt.ylabel('LR')315plt.grid()316plt.xlim(0, epochs)317plt.ylim(0)318plt.savefig(Path(save_dir) / 'LR.png', dpi=200)319plt.close()320321322def plot_val_txt(): # from utils.plots import *; plot_val()323# Plot val.txt histograms324x = np.loadtxt('val.txt', dtype=np.float32)325box = xyxy2xywh(x[:, :4])326cx, cy = box[:, 0], box[:, 1]327328fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)329ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)330ax.set_aspect('equal')331plt.savefig('hist2d.png', dpi=300)332333fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)334ax[0].hist(cx, bins=600)335ax[1].hist(cy, bins=600)336plt.savefig('hist1d.png', dpi=200)337338339def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()340# Plot targets.txt histograms341x = np.loadtxt('targets.txt', dtype=np.float32).T342s = ['x targets', 'y targets', 'width targets', 'height targets']343fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)344ax = ax.ravel()345for i in range(4):346ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')347ax[i].legend()348ax[i].set_title(s[i])349plt.savefig('targets.jpg', dpi=200)350351352def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()353# Plot file=study.txt generated by val.py (or plot all study*.txt in dir)354save_dir = Path(file).parent if file else Path(dir)355plot2 = False # plot additional results356if plot2:357ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()358359fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)360# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:361for f in sorted(save_dir.glob('study*.txt')):362y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T363x = np.arange(y.shape[1]) if x is None else np.array(x)364if plot2:365s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']366for i in range(7):367ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)368ax[i].set_title(s[i])369370j = y[3].argmax() + 1371ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,372label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))373374ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],375'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')376377ax2.grid(alpha=0.2)378ax2.set_yticks(np.arange(20, 60, 5))379ax2.set_xlim(0, 57)380ax2.set_ylim(25, 55)381ax2.set_xlabel('GPU Speed (ms/img)')382ax2.set_ylabel('COCO AP val')383ax2.legend(loc='lower right')384f = save_dir / 'study.png'385print(f'Saving {f}...')386plt.savefig(f, dpi=300)387388389@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395390@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611391def plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):392rboxes = poly2rbox(labels[:, 1:])393labels = np.concatenate((labels[:, :1], rboxes[:, :-1]), axis=1) # [cls xyls]394395# plot dataset labels396LOGGER.info(f"Plotting labels to {save_dir / 'labels_xyls.jpg'}... ")397c, b = labels[:, 0], labels[:, 1:].transpose() # classes, hboxes(xyls)398nc = int(c.max() + 1) # number of classes399x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'long_edge', 'short_edge'])400401# seaborn correlogram402sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))403plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)404plt.close()405406# matplotlib labels407matplotlib.use('svg') # faster408ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()409y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)410# [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195411ax[0].set_ylabel('instances')412if 0 < len(names) < 30:413ax[0].set_xticks(range(len(names)))414ax[0].set_xticklabels(names, rotation=90, fontsize=10)415else:416ax[0].set_xlabel('classes')417sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)418sn.histplot(x, x='long_edge', y='short_edge', ax=ax[3], bins=50, pmax=0.9)419420# rectangles421# labels[:, 1:3] = 0.5 # center422labels[:, 1:3] = 0.5 * img_size # center423# labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000424labels[:, 1:] = xywh2xyxy(labels[:, 1:])425# img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)426img = Image.fromarray(np.ones((img_size, img_size, 3), dtype=np.uint8) * 255)427for cls, *box in labels[:1000]:428ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot429ax[1].imshow(img)430ax[1].axis('off')431432for a in [0, 1, 2, 3]:433for s in ['top', 'right', 'left', 'bottom']:434ax[a].spines[s].set_visible(False)435436plt.savefig(save_dir / 'labels_xyls.jpg', dpi=200)437matplotlib.use('Agg')438plt.close()439440441def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()442# Plot evolve.csv hyp evolution results443evolve_csv = Path(evolve_csv)444data = pd.read_csv(evolve_csv)445keys = [x.strip() for x in data.columns]446x = data.values447f = fitness(x)448j = np.argmax(f) # max fitness index449plt.figure(figsize=(10, 12), tight_layout=True)450matplotlib.rc('font', **{'size': 8})451for i, k in enumerate(keys[7:]):452v = x[:, 7 + i]453mu = v[j] # best single result454plt.subplot(6, 5, i + 1)455plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')456plt.plot(mu, f.max(), 'k+', markersize=15)457plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters458if i % 5 != 0:459plt.yticks([])460print(f'{k:>15}: {mu:.3g}')461f = evolve_csv.with_suffix('.png') # filename462plt.savefig(f, dpi=200)463plt.close()464print(f'Saved {f}')465466467def plot_results(file='path/to/results.csv', dir=''):468# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')469save_dir = Path(file).parent if file else Path(dir)470#fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)471fig, ax = plt.subplots(2, 6, figsize=(18, 6), tight_layout=True)472ax = ax.ravel()473files = list(save_dir.glob('results*.csv'))474assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'475for fi, f in enumerate(files):476try:477data = pd.read_csv(f)478s = [x.strip() for x in data.columns]479x = data.values[:, 0]480#for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):481for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 7, 8]):482y = data.values[:, j]483# y[y == 0] = np.nan # don't show zero values484ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)485ax[i].set_title(s[j], fontsize=12)486# if j in [8, 9, 10]: # share train and val loss y axes487# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])488except Exception as e:489print(f'Warning: Plotting error for {f}: {e}')490ax[1].legend()491fig.savefig(save_dir / 'results.png', dpi=200)492plt.close()493494495def profile_idetection(start=0, stop=0, labels=(), save_dir=''):496# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()497ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()498s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']499files = list(Path(save_dir).glob('frames*.txt'))500for fi, f in enumerate(files):501try:502results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows503n = results.shape[1] # number of rows504x = np.arange(start, min(stop, n) if stop else n)505results = results[:, x]506t = (results[0] - results[0].min()) # set t0=0s507results[0] = x508for i, a in enumerate(ax):509if i < len(results):510label = labels[fi] if len(labels) else f.stem.replace('frames_', '')511a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)512a.set_title(s[i])513a.set_xlabel('time (s)')514# if fi == len(files) - 1:515# a.set_ylim(bottom=0)516for side in ['top', 'right']:517a.spines[side].set_visible(False)518else:519a.remove()520except Exception as e:521print(f'Warning: Plotting error for {f}; {e}')522ax[1].legend()523plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)524525526def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):527# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop528xyxy = torch.tensor(xyxy).view(-1, 4)529b = xyxy2xywh(xyxy) # boxes530if square:531b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square532b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad533xyxy = xywh2xyxy(b).long()534clip_coords(xyxy, im.shape)535crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]536if save:537file.parent.mkdir(parents=True, exist_ok=True) # make directory538cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop)539return crop540541542