CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hukaixuan19970627

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: hukaixuan19970627/yolov5_obb
Path: blob/master/val.py
Views: 475
1
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
"""
3
Validate a trained YOLOv5 model accuracy on a custom dataset
4
5
Usage:
6
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
7
"""
8
9
import argparse
10
import json
11
import os
12
import sys
13
from pathlib import Path
14
from threading import Thread
15
16
import numpy as np
17
import torch
18
from tqdm import tqdm
19
20
from utils.rboxs_utils import poly2hbb, rbox2poly
21
22
FILE = Path(__file__).resolve()
23
ROOT = FILE.parents[0] # YOLOv5 root directory
24
if str(ROOT) not in sys.path:
25
sys.path.append(str(ROOT)) # add ROOT to PATH
26
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
27
28
from models.common import DetectMultiBackend
29
from utils.callbacks import Callbacks
30
from utils.datasets import create_dataloader
31
from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
32
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
33
scale_coords, scale_polys, xywh2xyxy, xyxy2xywh, non_max_suppression_obb)
34
from utils.metrics import ConfusionMatrix, ap_per_class
35
from utils.plots import output_to_target, plot_images, plot_val_study
36
from utils.torch_utils import select_device, time_sync
37
38
39
def save_one_txt(predn, save_conf, shape, file):
40
# Save one txt result
41
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
42
for *xyxy, conf, cls in predn.tolist():
43
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
44
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
45
with open(file, 'a') as f:
46
f.write(('%g ' * len(line)).rstrip() % line + '\n')
47
48
49
# def save_one_json(predn, jdict, path, class_map):
50
def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map):
51
"""
52
Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]}
53
Args:
54
pred_hbbn (tensor): (n, [poly, conf, cls])
55
pred_polyn (tensor): (n, [xyxy, conf, cls])
56
"""
57
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
58
box = xyxy2xywh(pred_hbbn[:, :4]) # xywh
59
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
60
for p, b in zip(pred_polyn.tolist(), box.tolist()):
61
jdict.append({'image_id': image_id,
62
'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 0
63
'bbox': [round(x, 1) for x in b],
64
'score': round(p[-2], 5),
65
'poly': [round(x, 1) for x in p[:8]],
66
'file_name': path.stem})
67
68
69
def process_batch(detections, labels, iouv):
70
"""
71
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
72
Arguments:
73
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
74
labels (Array[M, 5]), class, x1, y1, x2, y2
75
Returns:
76
correct (Array[N, 10]), for 10 IoU levels
77
"""
78
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
79
iou = box_iou(labels[:, 1:], detections[:, :4])
80
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
81
if x[0].shape[0]:
82
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
83
if x[0].shape[0] > 1:
84
matches = matches[matches[:, 2].argsort()[::-1]]
85
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
86
# matches = matches[matches[:, 2].argsort()[::-1]]
87
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
88
matches = torch.Tensor(matches).to(iouv.device)
89
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
90
return correct
91
92
93
@torch.no_grad()
94
def run(data,
95
weights=None, # model.pt path(s)
96
batch_size=32, # batch size
97
imgsz=640, # inference size (pixels)
98
conf_thres=0.01, # confidence threshold
99
iou_thres=0.4, # NMS IoU threshold
100
task='val', # train, val, test, speed or study
101
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
102
workers=8, # max dataloader workers (per RANK in DDP mode)
103
single_cls=False, # treat as single-class dataset
104
augment=False, # augmented inference
105
verbose=False, # verbose output
106
save_txt=False, # save results to *.txt
107
save_hybrid=False, # save label+prediction hybrid results to *.txt
108
save_conf=False, # save confidences in --save-txt labels
109
save_json=False, # save a COCO-JSON results file
110
project=ROOT / 'runs/val', # save to project/name
111
name='exp', # save to project/name
112
exist_ok=False, # existing project/name ok, do not increment
113
half=True, # use FP16 half-precision inference
114
dnn=False, # use OpenCV DNN for ONNX inference
115
model=None,
116
dataloader=None,
117
save_dir=Path(''),
118
plots=True,
119
callbacks=Callbacks(),
120
compute_loss=None,
121
):
122
# Initialize/load model and set device
123
training = model is not None
124
if training: # called by train.py
125
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
126
127
half &= device.type != 'cpu' # half precision only supported on CUDA
128
model.half() if half else model.float()
129
else: # called directly
130
device = select_device(device, batch_size=batch_size)
131
132
# Directories
133
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
134
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
135
136
# Load model
137
model = DetectMultiBackend(weights, device=device, dnn=dnn)
138
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
139
imgsz = check_img_size(imgsz, s=stride) # check image size
140
half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
141
if pt or jit:
142
model.model.half() if half else model.model.float()
143
elif engine:
144
batch_size = model.batch_size
145
else:
146
half = False
147
batch_size = 1 # export.py models default to batch-size 1
148
device = torch.device('cpu')
149
LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
150
151
# Data
152
data = check_dataset(data) # check
153
154
# Configure
155
model.eval()
156
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
157
nc = 1 if single_cls else int(data['nc']) # number of classes
158
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
159
niou = iouv.numel()
160
161
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
162
# Dataloader
163
if not training:
164
model.warmup(imgsz=(1, 3, imgsz, imgsz), half=half) # warmup
165
pad = 0.0 if task == 'speed' else 0.5
166
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
167
dataloader = create_dataloader(data[task], imgsz, batch_size, stride, names, single_cls, pad=pad, rect=pt,
168
workers=workers, prefix=colorstr(f'{task}: '))[0]
169
170
seen = 0
171
confusion_matrix = ConfusionMatrix(nc=nc)
172
# names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
173
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
174
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', ' [email protected]:.95')
175
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
176
# loss = torch.zeros(3, device=device)
177
loss = torch.zeros(4, device=device)
178
jdict, stats, ap, ap_class = [], [], [], []
179
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
180
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
181
# targets (tensor): (n_gt_all_batch, [img_index clsid cx cy l s theta gaussian_θ_labels]) θ ∈ [-pi/2, pi/2)
182
# shapes (tensor): (b, [(h_raw, w_raw), (hw_ratios, wh_paddings)])
183
t1 = time_sync()
184
if pt or jit or engine:
185
im = im.to(device, non_blocking=True)
186
targets = targets.to(device)
187
im = im.half() if half else im.float() # uint8 to fp16/32
188
im /= 255 # 0 - 255 to 0.0 - 1.0
189
nb, _, height, width = im.shape # batch size, channels, height, width
190
t2 = time_sync()
191
dt[0] += t2 - t1
192
193
# Inference
194
out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
195
dt[1] += time_sync() - t2
196
197
# Loss
198
if compute_loss:
199
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls, theta
200
201
# NMS
202
# targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
203
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
204
t3 = time_sync()
205
# out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
206
out = non_max_suppression_obb(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) # list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)
207
dt[2] += time_sync() - t3
208
209
# Metrics
210
for si, pred in enumerate(out): # pred (tensor): (n, [xylsθ, conf, cls])
211
labels = targets[targets[:, 0] == si, 1:7] # labels (tensor):(n_gt, [clsid cx cy l s theta]) θ[-pi/2, pi/2)
212
nl = len(labels)
213
tcls = labels[:, 0].tolist() if nl else [] # target class
214
path, shape = Path(paths[si]), shapes[si][0] # shape (tensor): (h_raw, w_raw)
215
seen += 1
216
217
if len(pred) == 0:
218
if nl:
219
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
220
continue
221
222
# Predictions
223
if single_cls:
224
# pred[:, 5] = 0
225
pred[:, 6] = 0
226
poly = rbox2poly(pred[:, :5]) # (n, 8)
227
pred_poly = torch.cat((poly, pred[:, -2:]), dim=1) # (n, [poly, conf, cls])
228
hbbox = xywh2xyxy(poly2hbb(pred_poly[:, :8])) # (n, [x1 y1 x2 y2])
229
pred_hbb = torch.cat((hbbox, pred_poly[:, -2:]), dim=1) # (n, [xyxy, conf, cls])
230
231
pred_polyn = pred_poly.clone() # predn (tensor): (n, [poly, conf, cls])
232
scale_polys(im[si].shape[1:], pred_polyn[:, :8], shape, shapes[si][1]) # native-space pred
233
hbboxn = xywh2xyxy(poly2hbb(pred_polyn[:, :8])) # (n, [x1 y1 x2 y2])
234
pred_hbbn = torch.cat((hbboxn, pred_polyn[:, -2:]), dim=1) # (n, [xyxy, conf, cls]) native-space pred
235
236
237
# Evaluate
238
if nl:
239
# tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
240
tpoly = rbox2poly(labels[:, 1:6]) # target poly
241
tbox = xywh2xyxy(poly2hbb(tpoly)) # target hbb boxes [xyxy]
242
scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
243
labels_hbbn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels (n, [cls xyxy])
244
correct = process_batch(pred_hbbn, labels_hbbn, iouv)
245
if plots:
246
confusion_matrix.process_batch(pred_hbbn, labels_hbbn)
247
else:
248
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
249
# stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
250
stats.append((correct.cpu(), pred_poly[:, 8].cpu(), pred_poly[:, 9].cpu(), tcls)) # (correct, conf, pcls, tcls)
251
252
# Save/log
253
if save_txt: # just save hbb pred results!
254
save_one_txt(pred_hbbn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
255
# LOGGER.info('The horizontal prediction results has been saved in txt, which format is [cls cx cy w h /conf/]')
256
if save_json: # save hbb pred results and poly pred results.
257
save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map) # append to COCO-JSON dictionary
258
# LOGGER.info('The hbb and obb results has been saved in json file')
259
callbacks.run('on_val_image_end', pred_hbb, pred_hbbn, path, names, im[si])
260
261
# Plot images
262
if plots and batch_i < 3:
263
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
264
Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()
265
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
266
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
267
268
# Compute metrics
269
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
270
if len(stats) and stats[0].any():
271
tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
272
ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95
273
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
274
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
275
else:
276
nt = torch.zeros(1)
277
278
# Print results
279
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
280
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
281
282
# Print results per class
283
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
284
for i, c in enumerate(ap_class):
285
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
286
287
# Print speeds
288
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
289
if not training:
290
shape = (batch_size, 3, imgsz, imgsz)
291
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
292
293
# Plots
294
if plots:
295
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
296
callbacks.run('on_val_end')
297
298
# Save JSON
299
if save_json and len(jdict):
300
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
301
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
302
pred_json = str(save_dir / f"{w}_obb_predictions.json") # predictions json
303
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
304
with open(pred_json, 'w') as f:
305
json.dump(jdict, f)
306
LOGGER.info('---------------------The hbb and obb results has been saved in json file-----------------------')
307
308
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
309
check_requirements(['pycocotools'])
310
from pycocotools.coco import COCO
311
from pycocotools.cocoeval import COCOeval
312
313
anno = COCO(anno_json) # init annotations api
314
pred = anno.loadRes(pred_json) # init predictions api
315
eval = COCOeval(anno, pred, 'bbox')
316
if is_coco:
317
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
318
eval.evaluate()
319
eval.accumulate()
320
eval.summarize()
321
map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])
322
except Exception as e:
323
LOGGER.info(f'pycocotools unable to run: {e}')
324
325
# Return results
326
model.float() # for training
327
if not training:
328
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
329
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
330
maps = np.zeros(nc) + map
331
for i, c in enumerate(ap_class):
332
maps[c] = ap[i]
333
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
334
335
336
def parse_opt():
337
parser = argparse.ArgumentParser()
338
parser.add_argument('--data', type=str, default=ROOT / 'data/DroneVehicle_poly.yaml', help='dataset.yaml path')
339
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model.pt path(s)')
340
parser.add_argument('--batch-size', type=int, default=8, help='batch size')
341
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=1024, help='inference size (pixels)')
342
parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold')
343
parser.add_argument('--iou-thres', type=float, default=0.4, help='NMS IoU threshold')
344
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
345
parser.add_argument('--device', default='1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
346
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
347
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
348
parser.add_argument('--augment', action='store_true', help='augmented inference')
349
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
350
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
351
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
352
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
353
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
354
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
355
parser.add_argument('--name', default='exp', help='save to project/name')
356
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
357
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
358
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
359
opt = parser.parse_args()
360
opt.data = check_yaml(opt.data) # check YAML
361
opt.save_json |= opt.data.endswith('coco.yaml')
362
opt.save_txt |= opt.save_hybrid
363
print_args(FILE.stem, opt)
364
return opt
365
366
367
def main(opt):
368
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
369
370
if opt.task in ('train', 'val', 'test'): # run normally
371
# if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
372
if opt.conf_thres > 0.01:
373
LOGGER.info(f'WARNING: In oriented detection, confidence threshold {opt.conf_thres} >> 0.01 will produce invalid mAP values.')
374
run(**vars(opt))
375
376
else:
377
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
378
opt.half = True # FP16 for fastest results
379
if opt.task == 'speed': # speed benchmarks
380
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
381
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
382
for opt.weights in weights:
383
run(**vars(opt), plots=False)
384
385
elif opt.task == 'study': # speed vs mAP benchmarks
386
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
387
for opt.weights in weights:
388
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
389
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
390
for opt.imgsz in x: # img-size
391
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
392
r, _, t = run(**vars(opt), plots=False)
393
y.append(r + t) # results and times
394
np.savetxt(f, y, fmt='%10.4g') # save
395
os.system('zip -r study.zip study_*.txt')
396
plot_val_study(x=x) # plot
397
398
399
if __name__ == "__main__":
400
opt = parse_opt()
401
main(opt)
402
403