Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/val.py
Views: 475
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license1"""2Validate a trained YOLOv5 model accuracy on a custom dataset34Usage:5$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 6406"""78import argparse9import json10import os11import sys12from pathlib import Path13from threading import Thread1415import numpy as np16import torch17from tqdm import tqdm1819from utils.rboxs_utils import poly2hbb, rbox2poly2021FILE = Path(__file__).resolve()22ROOT = FILE.parents[0] # YOLOv5 root directory23if str(ROOT) not in sys.path:24sys.path.append(str(ROOT)) # add ROOT to PATH25ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative2627from models.common import DetectMultiBackend28from utils.callbacks import Callbacks29from utils.datasets import create_dataloader30from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,31coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,32scale_coords, scale_polys, xywh2xyxy, xyxy2xywh, non_max_suppression_obb)33from utils.metrics import ConfusionMatrix, ap_per_class34from utils.plots import output_to_target, plot_images, plot_val_study35from utils.torch_utils import select_device, time_sync363738def save_one_txt(predn, save_conf, shape, file):39# Save one txt result40gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh41for *xyxy, conf, cls in predn.tolist():42xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh43line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format44with open(file, 'a') as f:45f.write(('%g ' * len(line)).rstrip() % line + '\n')464748# def save_one_json(predn, jdict, path, class_map):49def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map):50"""51Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]}52Args:53pred_hbbn (tensor): (n, [poly, conf, cls])54pred_polyn (tensor): (n, [xyxy, conf, cls])55"""56image_id = int(path.stem) if path.stem.isnumeric() else path.stem57box = xyxy2xywh(pred_hbbn[:, :4]) # xywh58box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner59for p, b in zip(pred_polyn.tolist(), box.tolist()):60jdict.append({'image_id': image_id,61'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 062'bbox': [round(x, 1) for x in b],63'score': round(p[-2], 5),64'poly': [round(x, 1) for x in p[:8]],65'file_name': path.stem})666768def process_batch(detections, labels, iouv):69"""70Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.71Arguments:72detections (Array[N, 6]), x1, y1, x2, y2, conf, class73labels (Array[M, 5]), class, x1, y1, x2, y274Returns:75correct (Array[N, 10]), for 10 IoU levels76"""77correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)78iou = box_iou(labels[:, 1:], detections[:, :4])79x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match80if x[0].shape[0]:81matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]82if x[0].shape[0] > 1:83matches = matches[matches[:, 2].argsort()[::-1]]84matches = matches[np.unique(matches[:, 1], return_index=True)[1]]85# matches = matches[matches[:, 2].argsort()[::-1]]86matches = matches[np.unique(matches[:, 0], return_index=True)[1]]87matches = torch.Tensor(matches).to(iouv.device)88correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv89return correct909192@torch.no_grad()93def run(data,94weights=None, # model.pt path(s)95batch_size=32, # batch size96imgsz=640, # inference size (pixels)97conf_thres=0.01, # confidence threshold98iou_thres=0.4, # NMS IoU threshold99task='val', # train, val, test, speed or study100device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu101workers=8, # max dataloader workers (per RANK in DDP mode)102single_cls=False, # treat as single-class dataset103augment=False, # augmented inference104verbose=False, # verbose output105save_txt=False, # save results to *.txt106save_hybrid=False, # save label+prediction hybrid results to *.txt107save_conf=False, # save confidences in --save-txt labels108save_json=False, # save a COCO-JSON results file109project=ROOT / 'runs/val', # save to project/name110name='exp', # save to project/name111exist_ok=False, # existing project/name ok, do not increment112half=True, # use FP16 half-precision inference113dnn=False, # use OpenCV DNN for ONNX inference114model=None,115dataloader=None,116save_dir=Path(''),117plots=True,118callbacks=Callbacks(),119compute_loss=None,120):121# Initialize/load model and set device122training = model is not None123if training: # called by train.py124device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model125126half &= device.type != 'cpu' # half precision only supported on CUDA127model.half() if half else model.float()128else: # called directly129device = select_device(device, batch_size=batch_size)130131# Directories132save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run133(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir134135# Load model136model = DetectMultiBackend(weights, device=device, dnn=dnn)137stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine138imgsz = check_img_size(imgsz, s=stride) # check image size139half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA140if pt or jit:141model.model.half() if half else model.model.float()142elif engine:143batch_size = model.batch_size144else:145half = False146batch_size = 1 # export.py models default to batch-size 1147device = torch.device('cpu')148LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')149150# Data151data = check_dataset(data) # check152153# Configure154model.eval()155is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset156nc = 1 if single_cls else int(data['nc']) # number of classes157iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95158niou = iouv.numel()159160names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}161# Dataloader162if not training:163model.warmup(imgsz=(1, 3, imgsz, imgsz), half=half) # warmup164pad = 0.0 if task == 'speed' else 0.5165task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images166dataloader = create_dataloader(data[task], imgsz, batch_size, stride, names, single_cls, pad=pad, rect=pt,167workers=workers, prefix=colorstr(f'{task}: '))[0]168169seen = 0170confusion_matrix = ConfusionMatrix(nc=nc)171# names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}172class_map = coco80_to_coco91_class() if is_coco else list(range(1000))173s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', ' [email protected]:.95')174dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0175# loss = torch.zeros(3, device=device)176loss = torch.zeros(4, device=device)177jdict, stats, ap, ap_class = [], [], [], []178pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar179for batch_i, (im, targets, paths, shapes) in enumerate(pbar):180# targets (tensor): (n_gt_all_batch, [img_index clsid cx cy l s theta gaussian_θ_labels]) θ ∈ [-pi/2, pi/2)181# shapes (tensor): (b, [(h_raw, w_raw), (hw_ratios, wh_paddings)])182t1 = time_sync()183if pt or jit or engine:184im = im.to(device, non_blocking=True)185targets = targets.to(device)186im = im.half() if half else im.float() # uint8 to fp16/32187im /= 255 # 0 - 255 to 0.0 - 1.0188nb, _, height, width = im.shape # batch size, channels, height, width189t2 = time_sync()190dt[0] += t2 - t1191192# Inference193out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs194dt[1] += time_sync() - t2195196# Loss197if compute_loss:198loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls, theta199200# NMS201# targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels202lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling203t3 = time_sync()204# out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)205out = non_max_suppression_obb(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) # list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)206dt[2] += time_sync() - t3207208# Metrics209for si, pred in enumerate(out): # pred (tensor): (n, [xylsθ, conf, cls])210labels = targets[targets[:, 0] == si, 1:7] # labels (tensor):(n_gt, [clsid cx cy l s theta]) θ[-pi/2, pi/2)211nl = len(labels)212tcls = labels[:, 0].tolist() if nl else [] # target class213path, shape = Path(paths[si]), shapes[si][0] # shape (tensor): (h_raw, w_raw)214seen += 1215216if len(pred) == 0:217if nl:218stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))219continue220221# Predictions222if single_cls:223# pred[:, 5] = 0224pred[:, 6] = 0225poly = rbox2poly(pred[:, :5]) # (n, 8)226pred_poly = torch.cat((poly, pred[:, -2:]), dim=1) # (n, [poly, conf, cls])227hbbox = xywh2xyxy(poly2hbb(pred_poly[:, :8])) # (n, [x1 y1 x2 y2])228pred_hbb = torch.cat((hbbox, pred_poly[:, -2:]), dim=1) # (n, [xyxy, conf, cls])229230pred_polyn = pred_poly.clone() # predn (tensor): (n, [poly, conf, cls])231scale_polys(im[si].shape[1:], pred_polyn[:, :8], shape, shapes[si][1]) # native-space pred232hbboxn = xywh2xyxy(poly2hbb(pred_polyn[:, :8])) # (n, [x1 y1 x2 y2])233pred_hbbn = torch.cat((hbboxn, pred_polyn[:, -2:]), dim=1) # (n, [xyxy, conf, cls]) native-space pred234235236# Evaluate237if nl:238# tbox = xywh2xyxy(labels[:, 1:5]) # target boxes239tpoly = rbox2poly(labels[:, 1:6]) # target poly240tbox = xywh2xyxy(poly2hbb(tpoly)) # target hbb boxes [xyxy]241scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels242labels_hbbn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels (n, [cls xyxy])243correct = process_batch(pred_hbbn, labels_hbbn, iouv)244if plots:245confusion_matrix.process_batch(pred_hbbn, labels_hbbn)246else:247correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)248# stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)249stats.append((correct.cpu(), pred_poly[:, 8].cpu(), pred_poly[:, 9].cpu(), tcls)) # (correct, conf, pcls, tcls)250251# Save/log252if save_txt: # just save hbb pred results!253save_one_txt(pred_hbbn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))254# LOGGER.info('The horizontal prediction results has been saved in txt, which format is [cls cx cy w h /conf/]')255if save_json: # save hbb pred results and poly pred results.256save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map) # append to COCO-JSON dictionary257# LOGGER.info('The hbb and obb results has been saved in json file')258callbacks.run('on_val_image_end', pred_hbb, pred_hbbn, path, names, im[si])259260# Plot images261if plots and batch_i < 3:262f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels263Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()264f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions265Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()266267# Compute metrics268stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy269if len(stats) and stats[0].any():270tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)271ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95272mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()273nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class274else:275nt = torch.zeros(1)276277# Print results278pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format279LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))280281# Print results per class282if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):283for i, c in enumerate(ap_class):284LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))285286# Print speeds287t = tuple(x / seen * 1E3 for x in dt) # speeds per image288if not training:289shape = (batch_size, 3, imgsz, imgsz)290LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)291292# Plots293if plots:294confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))295callbacks.run('on_val_end')296297# Save JSON298if save_json and len(jdict):299w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights300anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json301pred_json = str(save_dir / f"{w}_obb_predictions.json") # predictions json302LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')303with open(pred_json, 'w') as f:304json.dump(jdict, f)305LOGGER.info('---------------------The hbb and obb results has been saved in json file-----------------------')306307try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb308check_requirements(['pycocotools'])309from pycocotools.coco import COCO310from pycocotools.cocoeval import COCOeval311312anno = COCO(anno_json) # init annotations api313pred = anno.loadRes(pred_json) # init predictions api314eval = COCOeval(anno, pred, 'bbox')315if is_coco:316eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate317eval.evaluate()318eval.accumulate()319eval.summarize()320map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])321except Exception as e:322LOGGER.info(f'pycocotools unable to run: {e}')323324# Return results325model.float() # for training326if not training:327s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''328LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")329maps = np.zeros(nc) + map330for i, c in enumerate(ap_class):331maps[c] = ap[i]332return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t333334335def parse_opt():336parser = argparse.ArgumentParser()337parser.add_argument('--data', type=str, default=ROOT / 'data/DroneVehicle_poly.yaml', help='dataset.yaml path')338parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model.pt path(s)')339parser.add_argument('--batch-size', type=int, default=8, help='batch size')340parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=1024, help='inference size (pixels)')341parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold')342parser.add_argument('--iou-thres', type=float, default=0.4, help='NMS IoU threshold')343parser.add_argument('--task', default='val', help='train, val, test, speed or study')344parser.add_argument('--device', default='1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')345parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')346parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')347parser.add_argument('--augment', action='store_true', help='augmented inference')348parser.add_argument('--verbose', action='store_true', help='report mAP by class')349parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')350parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')351parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')352parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')353parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')354parser.add_argument('--name', default='exp', help='save to project/name')355parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')356parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')357parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')358opt = parser.parse_args()359opt.data = check_yaml(opt.data) # check YAML360opt.save_json |= opt.data.endswith('coco.yaml')361opt.save_txt |= opt.save_hybrid362print_args(FILE.stem, opt)363return opt364365366def main(opt):367check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))368369if opt.task in ('train', 'val', 'test'): # run normally370# if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466371if opt.conf_thres > 0.01:372LOGGER.info(f'WARNING: In oriented detection, confidence threshold {opt.conf_thres} >> 0.01 will produce invalid mAP values.')373run(**vars(opt))374375else:376weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]377opt.half = True # FP16 for fastest results378if opt.task == 'speed': # speed benchmarks379# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...380opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False381for opt.weights in weights:382run(**vars(opt), plots=False)383384elif opt.task == 'study': # speed vs mAP benchmarks385# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...386for opt.weights in weights:387f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to388x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis389for opt.imgsz in x: # img-size390LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')391r, _, t = run(**vars(opt), plots=False)392y.append(r + t) # results and times393np.savetxt(f, y, fmt='%10.4g') # save394os.system('zip -r study.zip study_*.txt')395plot_val_study(x=x) # plot396397398if __name__ == "__main__":399opt = parse_opt()400main(opt)401402403