Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
yt-project
GitHub Repository: yt-project/yt
Path: blob/main/tests/report_failed_answers.py
928 views
1
"""
2
Script to generate report of failed answer tests or to generate golden answers
3
on cloud platforms like Travis
4
5
"""
6
7
import argparse
8
import base64
9
import collections
10
import datetime
11
import logging
12
import os
13
import re
14
import shutil
15
import sys
16
import tempfile
17
import xml.etree.ElementTree as ET
18
19
import nose
20
import numpy
21
import requests
22
23
from yt.config import ytcfg
24
from yt.utilities.answer_testing.framework import AnswerTesting
25
from yt.utilities.command_line import FileStreamer
26
27
logging.basicConfig(level=logging.INFO)
28
log = logging.getLogger("yt_report_failed_answers")
29
numpy.set_printoptions(threshold=5, edgeitems=1, precision=4)
30
31
32
def generate_failed_answers_html(failed_answers):
33
"""Generates html for the failed answer tests
34
35
This function creates a html and embeds the images (actual, expected,
36
difference) in it for the failed answers.
37
38
Parameters
39
----------
40
failed_answers : dict mapping string to dict
41
the key is a string denoting the test name, the value is a
42
dictionary that stores the actual, expected and difference plot
43
file locations of the test.
44
45
Returns
46
-------
47
string
48
a html page
49
50
"""
51
52
html_template = """
53
<html><head>
54
<style media="screen" type="text/css">
55
img{{
56
width:100%;
57
max-width:800px;
58
}}
59
</style>
60
<h1 style="text-align: center;">Failed Answer Tests</h1>
61
<p>
62
This report shows images of answer tests that failed when running
63
the answer tests.
64
</p>
65
<p>
66
<strong>Actual Image:</strong> plot generated while running the test<br/>
67
<strong>Expected Image:</strong> golden answer image<br/>
68
<strong>Difference Image:</strong> difference in the "actual"
69
and "expected" image
70
</p>
71
<hr/>
72
</head><body>
73
<table>{rows}</table>
74
</body></html>
75
"""
76
77
row_template = """
78
<tr>
79
<td align="center">Actual</td>
80
<td align="center">Expected</td>
81
<td align="center">Difference</td>
82
</tr>
83
<tr>
84
<td><img src="data:image/png;base64,{0}"></td>
85
<td><img src="data:image/png;base64,{1}"></td>
86
<td><img src="data:image/png;base64,{2}"></td>
87
</tr>
88
<tr><td align="center" colspan="3"><b>Test: {3}</b><hr/></td></tr>
89
"""
90
91
rows = []
92
93
for failed_test_file in failed_answers.values():
94
for test_name, images in failed_test_file.items():
95
encoded_images = {}
96
for key in images:
97
with open(images[key], "rb") as img:
98
img_data = base64.b64encode(img.read()).decode()
99
encoded_images[key] = img_data
100
101
formatted_row = row_template.format(
102
encoded_images["Actual"],
103
encoded_images["Expected"],
104
encoded_images["Difference"],
105
test_name,
106
)
107
rows.append(formatted_row)
108
109
html = html_template.format(rows="\n".join(rows))
110
return html
111
112
113
def upload_to_curldrop(data, filename):
114
"""Uploads file to yt's curldrop server
115
116
Uploads bytes `data` by the name `filename` to yt curldrop server.
117
118
Parameters
119
----------
120
data : bytes
121
Content to be uploaded
122
123
filename : string
124
Name of file at curldrop's upload server
125
126
Returns
127
-------
128
requests.models.Response
129
Response returned by curldrop server
130
131
"""
132
if "TRAVIS" in os.environ:
133
job_num = os.environ["TRAVIS_JOB_NUMBER"]
134
file_id = "Travis_Job_Num_" + job_num.replace(".", "_")
135
else:
136
file_id = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
137
filename = filename.format(file_id)
138
139
base_url = ytcfg.get("yt", "curldrop_upload_url")
140
upload_url = base_url + "/" + os.path.basename(filename)
141
response = requests.put(upload_url, data=data)
142
return response
143
144
145
def upload_failed_answers(failed_answers):
146
"""Uploads the result of failed answer tests
147
148
Uploads a html page of the failed answer tests.
149
150
Parameters
151
----------
152
failed_answers : dict mapping string to dict
153
the key is a string denoting the test name, the value is a
154
dictionary that stores the actual, expected and difference plot
155
file locations of the test.
156
157
Returns
158
-------
159
requests.models.Response
160
Response as returned by the upload service
161
162
"""
163
html = generate_failed_answers_html(failed_answers)
164
# convert html str to bytes
165
html = html.encode()
166
response = upload_to_curldrop(data=html, filename="failed_answers_{}.html")
167
168
return response
169
170
171
def generate_answers(answer_dir, answers):
172
"""Generate golden answers
173
174
Generates golden answers for the list of answers in ``answers`` and
175
saves them at ``answer_dir``.
176
177
Parameters
178
----------
179
answer_dir : string
180
directory location to save the generated answers
181
182
answers : list of string
183
Collection of missing answer tests specifying full name of the test.
184
eg. ['yt.visualization.tests.test_line_plots:test_multi_line_plot']
185
186
Returns
187
-------
188
bool
189
True, if all the missing answers are successfully generated
190
False, otherwise
191
192
"""
193
status = True
194
test_argv = [
195
os.path.basename(__file__),
196
"--with-answer-testing",
197
"--nologcapture",
198
"-s",
199
"-d",
200
"-v",
201
"--local",
202
f"--local-dir={answer_dir}",
203
"--answer-store",
204
]
205
206
for job in answers:
207
log.info("\n Generating answers for %s", job)
208
status &= nose.run(
209
argv=test_argv + [job], addplugins=[AnswerTesting()], exit=False
210
)
211
return status
212
213
214
def upload_answers(answers):
215
"""Uploads answers not present in answer-store
216
217
This function generates the answers for tests that are not present in
218
answer store and uploads a zip file of the same.
219
220
Parameters
221
----------
222
answers : list of string
223
Collection of missing answer tests specifying full name of the test.
224
eg. ['yt.visualization.tests.test_line_plots:test_multi_line_plot']
225
226
Returns
227
-------
228
requests.models.Response
229
Response as returned by the upload service when answers are
230
successfully uploaded
231
232
None
233
for the case when there was some error while generating the missing
234
golden-answers
235
236
"""
237
# Create temporary location to save new answers
238
tmpdir = tempfile.mkdtemp()
239
answer_dir = os.path.join(tmpdir, "answer-store")
240
if not os.path.exists(answer_dir):
241
os.mkdir(answer_dir)
242
zip_file = os.path.join(tmpdir, "new-answers")
243
244
status = generate_answers(answer_dir, answers)
245
if status:
246
zip_file = shutil.make_archive(zip_file, "zip", answer_dir)
247
data = iter(FileStreamer(open(zip_file, "rb")))
248
response = upload_to_curldrop(data=data, filename="new_answers_{}.zip")
249
shutil.rmtree(tmpdir)
250
return response
251
return None
252
253
254
def extract_image_locations(error_string):
255
"""Regex based function to extract image file locations.
256
257
Parameters
258
----------
259
error_string : String
260
The input string having file locations of 'Actual', 'Expected' and
261
'Difference' plots. This string is generated by yt's answer-testing
262
plugin, when the plot generated in the test does not match to its
263
golden answer image.
264
265
Returns
266
-------
267
dict
268
If the `error_string` is successfully parsed to extract plot locations,
269
then a dictionary with the keys 'Actual', 'Expected','Difference' and
270
values having corresponding plot file locations is returned.
271
eg. {'Actual': '/usr/tmp/tmp43la9b0w.png',
272
'Expected': '/usr/tmp/tmpbpaqbgi3.png',
273
'Difference': '/usr/tmp/tmp43la9b0w-failed-diff.png'}
274
None
275
When `error_string` does not conform to yt's answer-testing error
276
message, which has the information for plot file locations on disk.
277
278
"""
279
unknown_failure = False
280
base_regex = r"\s*\n\s*(.*?.png)"
281
img_regex = {
282
"Actual": "Actual:" + base_regex,
283
"Expected": "Expected:" + base_regex,
284
"Difference": "Difference:" + base_regex,
285
}
286
img_path = {}
287
for key in img_regex:
288
result = re.search(img_regex[key], error_string, re.MULTILINE)
289
if not result:
290
unknown_failure = True
291
break
292
# store the locations of actual, expected and diff plot files
293
img_path[key] = result.group(1)
294
295
if not unknown_failure:
296
return img_path
297
return None
298
299
300
def parse_nose_xml(nose_xml):
301
"""Parse xml file generated by nosetests.
302
303
Parse nose xml file to find following details:
304
Failed tests: These could be due to difference in golden answer image
305
and corresponding test plot.
306
307
Missing tests: These errors occur when a corresponding golden answer
308
image is not found.
309
310
Parameters
311
----------
312
nose_xml : string
313
full path of xml file to be parsed
314
315
Returns
316
-------
317
tuple : (failed_answers, missing_answers)
318
319
failed_answers : list of tuples (string, dict)
320
Collection of tuples where the first part is a string denoting the
321
test name, the second part is a dictionary that stores the actual,
322
expected and difference plot file locations of the test.
323
eg. [('yt.visualization.tests.test_line_plots:test_line_plot',
324
{'Actual': '/usr/tmp/tmp43la9b0w.png',
325
'Expected': '/usr/tmp/tmpbpaqbgi3.png',
326
'Difference': '/usr/tmp/tmp43la9b0w-failed-diff.png'}
327
)]
328
329
missing_answers : list of string
330
Collection of missing answer tests specifying full name of the test.
331
eg. ['yt.visualization.tests.test_line_plots:test_multi_line_plot']
332
333
"""
334
missing_answers = set()
335
failed_answers = collections.defaultdict(lambda: {})
336
missing_errors = ["No such file or directory", "There is no old answer available"]
337
tree = ET.parse(nose_xml)
338
testsuite = tree.getroot()
339
340
for testcase in testsuite:
341
for error in testcase.iter("error"):
342
handle_error(
343
error, testcase, missing_errors, missing_answers, failed_answers
344
)
345
for error in testcase.iter("failure"):
346
handle_error(
347
error, testcase, missing_errors, missing_answers, failed_answers
348
)
349
return failed_answers, missing_answers
350
351
352
def handle_error(error, testcase, missing_errors, missing_answers, failed_answers):
353
attribs = ["classname", "name"]
354
test_name = ":".join(testcase.attrib[a] for a in attribs)
355
message = error.attrib["message"]
356
if (
357
missing_errors[0] in error.attrib["message"]
358
or missing_errors[1] in error.attrib["message"]
359
):
360
missing_answers.add(test_name)
361
elif "Items are not equal" in error.attrib["message"]:
362
img_path = extract_image_locations(error.attrib["message"])
363
if img_path:
364
failed_answers[test_name][message] = img_path
365
366
367
if __name__ == "__main__":
368
"""Report failed answer tests of cloud platforms like Travis, Appveyor
369
370
This script parses the nosetests xml file generated after answer tests are
371
executed. If the test fail due to difference in actual and expected images,
372
this function uploads a html page having all the plots which got failed
373
(if executed with `-f` command line argument).
374
In case, answer store does not has a golden answer and if executed with
375
`-m` argument, it uploads missing answers zip file to yt's curldrop server.
376
377
"""
378
parser = argparse.ArgumentParser()
379
parser.add_argument(
380
"-f",
381
"--upload-failed-tests",
382
action="store_true",
383
help="Upload a comparison report of failed answer tests"
384
" to yt's curldrop server.",
385
)
386
parser.add_argument(
387
"-m",
388
"--upload-missing-answers",
389
action="store_true",
390
help="Upload tests' answers that are not found in answer-store.",
391
)
392
parser.add_argument(
393
"--xunit-file",
394
action="store",
395
dest="nosetest_xml",
396
required=True,
397
help="Name of the nosetests xml file to parse for failed answer tests.",
398
)
399
args = parser.parse_args()
400
401
# ANSI color codes
402
COLOR_PURPLE = "\x1b[35;1m"
403
COLOR_CYAN = "\x1b[36;1m"
404
COLOR_RESET = "\x1b[0m"
405
FLAG_EMOJI = " \U0001f6a9 "
406
407
failed_answers = missing_answers = None
408
if args.upload_failed_tests or args.upload_missing_answers:
409
failed_answers, missing_answers = parse_nose_xml(args.nosetest_xml)
410
411
if args.upload_failed_tests and failed_answers:
412
response = upload_failed_answers(failed_answers)
413
msg = ""
414
if response.ok:
415
msg += (
416
"\n"
417
+ FLAG_EMOJI
418
+ COLOR_PURPLE
419
+ "Successfully uploaded failed answer test(s) result."
420
" More details about the test failure can be found at the"
421
" URL: "
422
+ response.text.split("\n")[1]
423
+ COLOR_RESET
424
+ FLAG_EMOJI
425
+ "\n"
426
)
427
response = upload_answers(failed_answers)
428
if response is None:
429
log.error("Failed to upload answers for failed tests !")
430
sys.exit(1)
431
if response.ok:
432
msg += (
433
FLAG_EMOJI
434
+ COLOR_CYAN
435
+ "Successfully uploaded answer(s) for failed test at URL: "
436
+ response.text.split("\n")[1]
437
+ " . Please commit these "
438
"answers in the repository's answer-store." + COLOR_RESET + FLAG_EMOJI
439
)
440
log.info(msg)
441
442
if args.upload_missing_answers and missing_answers:
443
response = upload_answers(missing_answers)
444
if response is None:
445
log.error("Failed to upload missing answers !")
446
sys.exit(1)
447
if response.ok:
448
msg = (
449
FLAG_EMOJI
450
+ COLOR_CYAN
451
+ "Successfully uploaded missing answer(s) at URL: "
452
+ response.text.split("\n")[1]
453
+ " . Please commit these "
454
"answers in the repository's answer-store." + COLOR_RESET + FLAG_EMOJI
455
)
456
log.info(msg)
457
458