CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hukaixuan19970627

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: hukaixuan19970627/yolov5_obb
Path: blob/master/DOTA_devkit/ucasaod_evaluation.py
Views: 475
1
# --------------------------------------------------------
2
# dota_evaluation_task1
3
# Licensed under The MIT License [see LICENSE for details]
4
# Written by Jian Ding, based on code from Bharath Hariharan
5
# --------------------------------------------------------
6
7
"""
8
To use the code, users should to config detpath, annopath and imagesetfile
9
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
10
search for PATH_TO_BE_CONFIGURED to config the paths
11
Note, the evaluation is on the large scale images
12
"""
13
import xml.etree.ElementTree as ET
14
import os
15
#import cPickle
16
import numpy as np
17
import matplotlib.pyplot as plt
18
import polyiou
19
from functools import partial
20
21
def parse_gt(filename):
22
"""
23
:param filename: ground truth file to parse
24
:return: all instances in a picture
25
"""
26
objects = []
27
with open(filename, 'r') as f:
28
while True:
29
line = f.readline()
30
if line:
31
splitlines = line.strip().split(' ')
32
object_struct = {}
33
if (len(splitlines) < 9):
34
continue
35
object_struct['name'] = splitlines[8]
36
37
if (len(splitlines) == 9):
38
object_struct['difficult'] = 0
39
elif (len(splitlines) == 10):
40
object_struct['difficult'] = int(splitlines[9])
41
object_struct['bbox'] = [float(splitlines[0]),
42
float(splitlines[1]),
43
float(splitlines[2]),
44
float(splitlines[3]),
45
float(splitlines[4]),
46
float(splitlines[5]),
47
float(splitlines[6]),
48
float(splitlines[7])]
49
objects.append(object_struct)
50
else:
51
break
52
return objects
53
def voc_ap(rec, prec, use_07_metric=False):
54
""" ap = voc_ap(rec, prec, [use_07_metric])
55
Compute VOC AP given precision and recall.
56
If use_07_metric is true, uses the
57
VOC 07 11 point method (default:False).
58
"""
59
if use_07_metric:
60
# 11 point metric
61
ap = 0.
62
for t in np.arange(0., 1.1, 0.1):
63
if np.sum(rec >= t) == 0:
64
p = 0
65
else:
66
p = np.max(prec[rec >= t])
67
ap = ap + p / 11.
68
else:
69
# correct AP calculation
70
# first append sentinel values at the end
71
mrec = np.concatenate(([0.], rec, [1.]))
72
mpre = np.concatenate(([0.], prec, [0.]))
73
74
# compute the precision envelope
75
for i in range(mpre.size - 1, 0, -1):
76
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
77
78
# to calculate area under PR curve, look for points
79
# where X axis (recall) changes value
80
i = np.where(mrec[1:] != mrec[:-1])[0]
81
82
# and sum (\Delta recall) * prec
83
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
84
return ap
85
86
87
def voc_eval(detpath,
88
annopath,
89
imagesetfile,
90
classname,
91
# cachedir,
92
ovthresh=0.5,
93
use_07_metric=False):
94
"""rec, prec, ap = voc_eval(detpath,
95
annopath,
96
imagesetfile,
97
classname,
98
[ovthresh],
99
[use_07_metric])
100
Top level function that does the PASCAL VOC evaluation.
101
detpath: Path to detections
102
detpath.format(classname) should produce the detection results file.
103
annopath: Path to annotations
104
annopath.format(imagename) should be the xml annotations file.
105
imagesetfile: Text file containing the list of images, one image per line.
106
classname: Category name (duh)
107
cachedir: Directory for caching the annotations
108
[ovthresh]: Overlap threshold (default = 0.5)
109
[use_07_metric]: Whether to use VOC07's 11 point AP computation
110
(default False)
111
"""
112
# assumes detections are in detpath.format(classname)
113
# assumes annotations are in annopath.format(imagename)
114
# assumes imagesetfile is a text file with each line an image name
115
# cachedir caches the annotations in a pickle file
116
117
# first load gt
118
#if not os.path.isdir(cachedir):
119
# os.mkdir(cachedir)
120
#cachefile = os.path.join(cachedir, 'annots.pkl')
121
# read list of images
122
with open(imagesetfile, 'r') as f:
123
lines = f.readlines()
124
imagenames = [x.strip() for x in lines]
125
#print('imagenames: ', imagenames)
126
#if not os.path.isfile(cachefile):
127
# load annots
128
recs = {}
129
for i, imagename in enumerate(imagenames):
130
#print('parse_files name: ', annopath.format(imagename))
131
recs[imagename] = parse_gt(annopath.format(imagename))
132
#if i % 100 == 0:
133
# print ('Reading annotation for {:d}/{:d}'.format(
134
# i + 1, len(imagenames)) )
135
# save
136
#print ('Saving cached annotations to {:s}'.format(cachefile))
137
#with open(cachefile, 'w') as f:
138
# cPickle.dump(recs, f)
139
#else:
140
# load
141
#with open(cachefile, 'r') as f:
142
# recs = cPickle.load(f)
143
144
# extract gt objects for this class
145
class_recs = {}
146
npos = 0
147
for imagename in imagenames:
148
R = [obj for obj in recs[imagename] if obj['name'] == classname]
149
bbox = np.array([x['bbox'] for x in R])
150
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
151
det = [False] * len(R)
152
npos = npos + sum(~difficult)
153
class_recs[imagename] = {'bbox': bbox,
154
'difficult': difficult,
155
'det': det}
156
157
# read dets from Task1* files
158
detfile = detpath.format(classname)
159
with open(detfile, 'r') as f:
160
lines = f.readlines()
161
162
splitlines = [x.strip().split(' ') for x in lines]
163
image_ids = [x[0] for x in splitlines]
164
confidence = np.array([float(x[1]) for x in splitlines])
165
166
#print('check confidence: ', confidence)
167
168
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
169
170
# sort by confidence
171
sorted_ind = np.argsort(-confidence)
172
sorted_scores = np.sort(-confidence)
173
174
#print('check sorted_scores: ', sorted_scores)
175
#print('check sorted_ind: ', sorted_ind)
176
177
## note the usage only in numpy not for list
178
BB = BB[sorted_ind, :]
179
image_ids = [image_ids[x] for x in sorted_ind]
180
#print('check imge_ids: ', image_ids)
181
#print('imge_ids len:', len(image_ids))
182
# go down dets and mark TPs and FPs
183
nd = len(image_ids)
184
tp = np.zeros(nd)
185
fp = np.zeros(nd)
186
for d in range(nd):
187
R = class_recs[image_ids[d]]
188
bb = BB[d, :].astype(float)
189
ovmax = -np.inf
190
BBGT = R['bbox'].astype(float)
191
192
## compute det bb with each BBGT
193
194
if BBGT.size > 0:
195
# compute overlaps
196
# intersection
197
198
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
199
# pdb.set_trace()
200
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
201
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
202
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
203
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
204
bb_xmin = np.min(bb[0::2])
205
bb_ymin = np.min(bb[1::2])
206
bb_xmax = np.max(bb[0::2])
207
bb_ymax = np.max(bb[1::2])
208
209
ixmin = np.maximum(BBGT_xmin, bb_xmin)
210
iymin = np.maximum(BBGT_ymin, bb_ymin)
211
ixmax = np.minimum(BBGT_xmax, bb_xmax)
212
iymax = np.minimum(BBGT_ymax, bb_ymax)
213
iw = np.maximum(ixmax - ixmin + 1., 0.)
214
ih = np.maximum(iymax - iymin + 1., 0.)
215
inters = iw * ih
216
217
# union
218
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
219
(BBGT_xmax - BBGT_xmin + 1.) *
220
(BBGT_ymax - BBGT_ymin + 1.) - inters)
221
222
overlaps = inters / uni
223
224
BBGT_keep_mask = overlaps > 0
225
BBGT_keep = BBGT[BBGT_keep_mask, :]
226
BBGT_keep_index = np.where(overlaps > 0)[0]
227
# pdb.set_trace()
228
def calcoverlaps(BBGT_keep, bb):
229
overlaps = []
230
for index, GT in enumerate(BBGT_keep):
231
232
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
233
overlaps.append(overlap)
234
return overlaps
235
if len(BBGT_keep) > 0:
236
overlaps = calcoverlaps(BBGT_keep, bb)
237
238
ovmax = np.max(overlaps)
239
jmax = np.argmax(overlaps)
240
# pdb.set_trace()
241
jmax = BBGT_keep_index[jmax]
242
243
if ovmax > ovthresh:
244
if not R['difficult'][jmax]:
245
if not R['det'][jmax]:
246
tp[d] = 1.
247
R['det'][jmax] = 1
248
else:
249
fp[d] = 1.
250
else:
251
fp[d] = 1.
252
253
# compute precision recall
254
255
print('check fp:', fp)
256
print('check tp', tp)
257
258
259
print('npos num:', npos)
260
fp = np.cumsum(fp)
261
tp = np.cumsum(tp)
262
263
rec = tp / float(npos)
264
# avoid divide by zero in case the first detection matches a difficult
265
# ground truth
266
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
267
ap = voc_ap(rec, prec, use_07_metric)
268
269
return rec, prec, ap
270
271
def main():
272
273
detpath = r'/mnt/SSD/lwt_workdir/BeyondBoundingBox/ucasaod_pkl/ucas_angle/result_raw_retinanet/Task1_{:s}.txt'
274
annopath = r'/mnt/SSD/lwt_workdir/BeyondBoundingBox/data/ucas_aod/Test/labelTxt/{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
275
imagesetfile = r'/mnt/SSD/lwt_workdir/BeyondBoundingBox/data/ucas_aod/Test/test.txt'
276
277
278
# For ucasaod
279
classnames = ['car', 'airplane']
280
classaps = []
281
map = 0
282
for classname in classnames:
283
print('classname:', classname)
284
rec, prec, ap = voc_eval(detpath,
285
annopath,
286
imagesetfile,
287
classname,
288
ovthresh=0.7,
289
use_07_metric=True)
290
map = map + ap
291
#print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
292
print('ap: ', ap)
293
classaps.append(ap)
294
295
# umcomment to show p-r curve of each category
296
# plt.figure(figsize=(8,4))
297
# plt.xlabel('recall')
298
# plt.ylabel('precision')
299
# plt.plot(rec, prec)
300
# plt.show()
301
map = map/len(classnames)
302
print('map:', map)
303
classaps = 100*np.array(classaps)
304
print('classaps: ', classaps)
305
if __name__ == '__main__':
306
main()
307