Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemath
GitHub Repository: sagemath/sagesmc
Path: blob/master/src/sage/doctest/reporting.py
8817 views
1
"""
2
Reporting doctest results
3
4
This module determines how doctest results are reported to the user.
5
6
It also computes the exit status in the ``error_status`` attribute of
7
:class:DocTestReporter. This is a bitwise OR of the following bits:
8
9
- 1: Doctest failure
10
- 2: Bad command line syntax or invalid options
11
- 4: Test timed out
12
- 8: Test exited with non-zero status
13
- 16: Test crashed with a signal (e.g. segmentation fault)
14
- 32: TAB character found
15
- 64: Internal error in the doctesting framework
16
- 128: Testing interrupted, not all tests run
17
18
19
AUTHORS:
20
21
- David Roe (2012-03-27) -- initial version, based on Robert Bradshaw's code.
22
"""
23
24
#*****************************************************************************
25
# Copyright (C) 2012 David Roe <[email protected]>
26
# Robert Bradshaw <[email protected]>
27
# William Stein <[email protected]>
28
# Copyright (C) 2013 Jeroen Demeyer <[email protected]>
29
#
30
# Distributed under the terms of the GNU General Public License (GPL)
31
# as published by the Free Software Foundation; either version 2 of
32
# the License, or (at your option) any later version.
33
# http://www.gnu.org/licenses/
34
#*****************************************************************************
35
36
37
import sys, signal
38
from sage.structure.sage_object import SageObject
39
from sage.doctest.util import count_noun
40
from sage.doctest.sources import DictAsObject
41
42
def signal_name(sig):
43
"""
44
Return a string describing a signal number.
45
46
EXAMPLES::
47
48
sage: import signal
49
sage: from sage.doctest.reporting import signal_name
50
sage: signal_name(signal.SIGSEGV)
51
'segmentation fault'
52
sage: signal_name(9)
53
'kill signal'
54
sage: signal_name(12345)
55
'signal 12345'
56
"""
57
if sig == signal.SIGHUP:
58
return "hangup"
59
if sig == signal.SIGINT:
60
return "interrupt"
61
if sig == signal.SIGQUIT:
62
return "quit"
63
if sig == signal.SIGILL:
64
return "illegal instruction"
65
if sig == signal.SIGABRT:
66
return "abort"
67
if sig == signal.SIGFPE:
68
return "floating point exception"
69
if sig == signal.SIGKILL:
70
return "kill signal"
71
if sig == signal.SIGSEGV:
72
return "segmentation fault"
73
if sig == signal.SIGPIPE:
74
return "broken pipe"
75
if sig == signal.SIGALRM:
76
return "alarm"
77
if sig == signal.SIGTERM:
78
return "terminate"
79
if sig == signal.SIGBUS:
80
return "bus error"
81
return "signal %s"%sig
82
83
class DocTestReporter(SageObject):
84
"""
85
This class reports to the users on the results of doctests.
86
"""
87
def __init__(self, controller):
88
"""
89
Initialize the reporter.
90
91
INPUT:
92
93
- ``controller`` -- a
94
:class:`sage.doctest.control.DocTestController` instance.
95
Note that some methods assume that appropriate tests have
96
been run by the controller.
97
98
EXAMPLES::
99
100
sage: from sage.doctest.reporting import DocTestReporter
101
sage: from sage.doctest.control import DocTestController, DocTestDefaults
102
sage: from sage.env import SAGE_SRC
103
sage: import os
104
sage: filename = os.path.join(SAGE_SRC,'sage','doctest','reporting.py')
105
sage: DC = DocTestController(DocTestDefaults(),[filename])
106
sage: DTR = DocTestReporter(DC)
107
"""
108
self.controller = controller
109
self.postscript = dict(lines=[], cputime=0, walltime=0)
110
self.sources_completed = 0
111
self.stats = {}
112
self.error_status = 0
113
114
def report_head(self, source):
115
"""
116
Return the "sage -t [options] file.py" line as string.
117
118
INPUT:
119
120
- ``source`` -- a source from :mod:`sage.doctest.sources`
121
122
EXAMPLES::
123
124
sage: from sage.doctest.reporting import DocTestReporter
125
sage: from sage.doctest.control import DocTestController, DocTestDefaults
126
sage: from sage.doctest.sources import FileDocTestSource
127
sage: from sage.doctest.forker import SageDocTestRunner
128
sage: from sage.env import SAGE_SRC
129
sage: filename = os.path.join(SAGE_SRC,'sage','doctest','reporting.py')
130
sage: DD = DocTestDefaults()
131
sage: FDS = FileDocTestSource(filename,DD)
132
sage: DC = DocTestController(DD, [filename])
133
sage: DTR = DocTestReporter(DC)
134
sage: print DTR.report_head(FDS)
135
sage -t .../sage/doctest/reporting.py
136
137
The same with various options::
138
139
sage: DD.long = True
140
sage: print DTR.report_head(FDS)
141
sage -t --long .../sage/doctest/reporting.py
142
"""
143
cmd = "sage -t"
144
if self.controller.options.long:
145
cmd += " --long"
146
warnlong = self.controller.options.warn_long
147
if warnlong is not None:
148
cmd += " --warn-long"
149
if warnlong != 1.0:
150
cmd += " %.1f"%(warnlong)
151
cmd += " " + source.printpath
152
return cmd
153
154
def report(self, source, timeout, return_code, results, output, pid=None):
155
"""
156
Report on the result of running doctests on a given source.
157
158
This doesn't print the :meth:`report_head`, which is assumed
159
to be printed already.
160
161
INPUT:
162
163
- ``source`` -- a source from :mod:`sage.doctest.sources`
164
165
- ``timeout`` -- a boolean, whether doctests timed out
166
167
- ``return_code`` -- an int, the return code of the process
168
running doctests on that file.
169
170
- ``results`` -- (irrelevant if ``timeout`` or
171
``return_code``), a tuple
172
173
- ``ntests`` -- the number of doctests
174
175
- ``timings`` -- a
176
:class:`sage.doctest.sources.DictAsObject` instance
177
storing timing data.
178
179
- ``output`` -- a string, printed if there was some kind of
180
failure
181
182
- ``pid`` -- optional integer (default: ``None``). The pid of
183
the worker process.
184
185
EXAMPLES::
186
187
sage: from sage.doctest.reporting import DocTestReporter
188
sage: from sage.doctest.control import DocTestController, DocTestDefaults
189
sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
190
sage: from sage.doctest.forker import SageDocTestRunner
191
sage: from sage.doctest.parsing import SageOutputChecker
192
sage: from sage.doctest.util import Timer
193
sage: from sage.env import SAGE_SRC
194
sage: import os, sys, doctest
195
sage: filename = os.path.join(SAGE_SRC,'sage','doctest','reporting.py')
196
sage: DD = DocTestDefaults()
197
sage: FDS = FileDocTestSource(filename,DD)
198
sage: DC = DocTestController(DD,[filename])
199
sage: DTR = DocTestReporter(DC)
200
201
You can report a timeout::
202
203
sage: DTR.report(FDS, True, 0, None, "Output so far...", pid=1234)
204
Timed out
205
**********************************************************************
206
Tests run before process (pid=1234) timed out:
207
Output so far...
208
**********************************************************************
209
sage: DTR.stats
210
{'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}
211
212
Or a process that returned a bad exit code::
213
214
sage: DTR.report(FDS, False, 3, None, "Output before trouble")
215
Bad exit: 3
216
**********************************************************************
217
Tests run before process failed:
218
Output before trouble
219
**********************************************************************
220
sage: DTR.stats
221
{'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}
222
223
Or a process that segfaulted::
224
225
sage: import signal
226
sage: DTR.report(FDS, False, -signal.SIGSEGV, None, "Output before trouble")
227
Killed due to segmentation fault
228
**********************************************************************
229
Tests run before process failed:
230
Output before trouble
231
**********************************************************************
232
sage: DTR.stats
233
{'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}
234
235
Report a timeout with results and a ``SIGKILL``::
236
237
sage: DTR.report(FDS, True, -signal.SIGKILL, (1,None), "Output before trouble")
238
Timed out after testing finished (and interrupt failed)
239
**********************************************************************
240
Tests run before process timed out:
241
Output before trouble
242
**********************************************************************
243
sage: DTR.stats
244
{'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}
245
246
This is an internal error since results is None::
247
248
sage: DTR.report(FDS, False, 0, None, "All output")
249
Error in doctesting framework (bad result returned)
250
**********************************************************************
251
Tests run before error:
252
All output
253
**********************************************************************
254
sage: DTR.stats
255
{'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}
256
257
Or tell the user that everything succeeded::
258
259
sage: doctests, extras = FDS.create_doctests(globals())
260
sage: runner = SageDocTestRunner(SageOutputChecker(), verbose=False, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
261
sage: Timer().start().stop().annotate(runner)
262
sage: D = DictAsObject({'err':None})
263
sage: runner.update_results(D)
264
0
265
sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
266
[... tests, 0.00 s]
267
sage: DTR.stats
268
{'sage.doctest.reporting': {'walltime': ...}}
269
270
Or inform the user that some doctests failed::
271
272
sage: runner.failures = 1
273
sage: runner.update_results(D)
274
1
275
sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Doctest output including the failure...")
276
[... tests, 1 failure, 0.00 s]
277
278
If the user has requested that we report on skipped doctests,
279
we do so::
280
281
sage: DC.options = DocTestDefaults(show_skipped=True)
282
sage: import collections
283
sage: optionals = collections.defaultdict(int)
284
sage: optionals['magma'] = 5; optionals['long time'] = 4; optionals[''] = 1; optionals['not tested'] = 2
285
sage: D = DictAsObject(dict(err=None,optionals=optionals))
286
sage: runner.failures = 0
287
sage: runner.update_results(D)
288
0
289
sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
290
1 unlabeled test not run
291
4 long tests not run
292
5 magma tests not run
293
2 other tests skipped
294
[... tests, 0.00 s]
295
296
Test an internal error in the reporter::
297
298
sage: DTR.report(None, None, None, None, None)
299
Traceback (most recent call last):
300
...
301
AttributeError: 'NoneType' object has no attribute 'basename'
302
"""
303
log = self.controller.log
304
process_name = 'process (pid={0})'.format(pid) if pid else 'process'
305
try:
306
postscript = self.postscript
307
stats = self.stats
308
basename = source.basename
309
cmd = self.report_head(source)
310
311
try:
312
ntests, result_dict = results
313
except (TypeError, ValueError):
314
ntests = 0
315
result_dict = DictAsObject(dict(err='badresult'))
316
317
if timeout:
318
fail_msg = "Timed out"
319
if ntests > 0:
320
fail_msg += " after testing finished"
321
if return_code > 0:
322
fail_msg += " (with error after interrupt)"
323
elif return_code < 0:
324
sig = -return_code
325
if sig == signal.SIGKILL:
326
fail_msg += " (and interrupt failed)"
327
else:
328
fail_msg += " (with %s after interrupt)"%signal_name(sig)
329
log(" %s\n%s\nTests run before %s timed out:"%(fail_msg, "*"*70, process_name))
330
log(output)
331
log("*"*70)
332
postscript['lines'].append(cmd + " # %s"%fail_msg)
333
stats[basename] = dict(failed=True, walltime=1e6)
334
self.error_status |= 4
335
elif return_code:
336
if return_code > 0:
337
fail_msg = "Bad exit: %s"%return_code
338
else:
339
fail_msg = "Killed due to %s"%signal_name(-return_code)
340
if ntests > 0:
341
fail_msg += " after testing finished"
342
log(" %s\n%s\nTests run before %s failed:"%(fail_msg,"*"*70, process_name))
343
log(output)
344
log("*"*70)
345
postscript['lines'].append(cmd + " # %s" % fail_msg)
346
stats[basename] = dict(failed=True, walltime=1e6)
347
self.error_status |= (8 if return_code > 0 else 16)
348
else:
349
if hasattr(result_dict, 'walltime') and hasattr(result_dict.walltime, '__len__') and len(result_dict.walltime) > 0:
350
wall = sum(result_dict.walltime) / len(result_dict.walltime)
351
else:
352
wall = 1e6
353
if hasattr(result_dict, 'cputime') and hasattr(result_dict.cputime, '__len__') and len(result_dict.cputime) > 0:
354
cpu = sum(result_dict.cputime) / len(result_dict.cputime)
355
else:
356
cpu = 1e6
357
if result_dict.err == 'badresult':
358
log(" Error in doctesting framework (bad result returned)\n%s\nTests run before error:"%("*"*70))
359
log(output)
360
log("*"*70)
361
postscript['lines'].append(cmd + " # Testing error: bad result")
362
self.error_status |= 64
363
elif result_dict.err == 'noresult':
364
log(" Error in doctesting framework (no result returned)\n%s\nTests run before error:"%("*"*70))
365
log(output)
366
log("*"*70)
367
postscript['lines'].append(cmd + " # Testing error: no result")
368
self.error_status |= 64
369
elif result_dict.err == 'tab':
370
if len(result_dict.tab_linenos) > 5:
371
result_dict.tab_linenos[3:-1] = "..."
372
tabs = " " + ",".join(result_dict.tab_linenos)
373
if len(result_dict.tab_linenos) > 1:
374
tabs = "s" + tabs
375
log(" Error: TAB character found at line%s"%(tabs))
376
postscript['lines'].append(cmd + " # Tab character found")
377
self.error_status |= 32
378
elif result_dict.err is not None:
379
# This case should not occur
380
if result_dict.err is True:
381
fail_msg = "Error in doctesting framework"
382
else:
383
if hasattr(result_dict.err, '__name__'):
384
err = result_dict.err.__name__
385
else:
386
err = repr(result_dict.err)
387
fail_msg = "%s in doctesting framework"%err
388
389
log(" %s\n%s"%(fail_msg, "*"*70))
390
if output:
391
log("Tests run before doctest exception:\n" + output)
392
log("*"*70)
393
postscript['lines'].append(cmd + " # %s"%fail_msg)
394
if hasattr(result_dict, 'tb'):
395
log(result_dict.tb)
396
if hasattr(result_dict, 'walltime'):
397
stats[basename] = dict(failed=True, walltime=wall)
398
else:
399
stats[basename] = dict(failed=True, walltime=1e6)
400
self.error_status |= 64
401
if result_dict.err is None or result_dict.err == 'tab':
402
f = result_dict.failures
403
if f:
404
postscript['lines'].append(cmd + " # %s failed" % (count_noun(f, "doctest")))
405
self.error_status |= 1
406
if f or result_dict.err == 'tab':
407
stats[basename] = dict(failed=True, walltime=wall)
408
else:
409
stats[basename] = dict(walltime=wall)
410
postscript['cputime'] += cpu
411
postscript['walltime'] += wall
412
413
if self.controller.options.show_skipped:
414
try:
415
optionals = result_dict.optionals
416
except AttributeError:
417
optionals = dict()
418
if self.controller.options.optional is not True: # if True we test all optional tags
419
untested = 0 # Report not tested/implemented tests at the end
420
seen_other = False
421
for tag in sorted(optionals.keys()):
422
nskipped = optionals[tag]
423
if tag == "long time":
424
if not self.controller.options.long:
425
seen_other = True
426
log(" %s not run"%(count_noun(nskipped, "long test")))
427
elif tag in ("not tested", "not implemented"):
428
untested += nskipped
429
else:
430
if tag not in self.controller.options.optional:
431
seen_other = True
432
if tag == "bug":
433
log(" %s not run due to known bugs"%(count_noun(nskipped, "test")))
434
elif tag == "":
435
log(" %s not run"%(count_noun(nskipped, "unlabeled test")))
436
else:
437
log(" %s not run"%(count_noun(nskipped, tag + " test")))
438
if untested:
439
log (" %s skipped"%(count_noun(untested, "%stest"%("other " if seen_other else ""))))
440
log(" [%s, %s%.2f s]" % (count_noun(ntests, "test"), "%s, "%(count_noun(f, "failure")) if f else "", wall))
441
self.sources_completed += 1
442
443
except Exception:
444
import traceback
445
log(traceback.format_exc(), end="")
446
447
448
def finalize(self):
449
"""
450
Print out the postcript that summarizes the doctests that were run.
451
452
EXAMPLES:
453
454
First we have to set up a bunch of stuff::
455
456
sage: from sage.doctest.reporting import DocTestReporter
457
sage: from sage.doctest.control import DocTestController, DocTestDefaults
458
sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
459
sage: from sage.doctest.forker import SageDocTestRunner
460
sage: from sage.doctest.parsing import SageOutputChecker
461
sage: from sage.doctest.util import Timer
462
sage: from sage.env import SAGE_SRC
463
sage: import os, sys, doctest
464
sage: filename = os.path.join(SAGE_SRC,'sage','doctest','reporting.py')
465
sage: DD = DocTestDefaults()
466
sage: FDS = FileDocTestSource(filename,DD)
467
sage: DC = DocTestController(DD,[filename])
468
sage: DTR = DocTestReporter(DC)
469
470
Now we pretend to run some doctests::
471
472
sage: DTR.report(FDS, True, 0, None, "Output so far...", pid=1234)
473
Timed out
474
**********************************************************************
475
Tests run before process (pid=1234) timed out:
476
Output so far...
477
**********************************************************************
478
sage: DTR.report(FDS, False, 3, None, "Output before bad exit")
479
Bad exit: 3
480
**********************************************************************
481
Tests run before process failed:
482
Output before bad exit
483
**********************************************************************
484
sage: doctests, extras = FDS.create_doctests(globals())
485
sage: runner = SageDocTestRunner(SageOutputChecker(), verbose=False, sage_options=DD,optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
486
sage: t = Timer().start().stop()
487
sage: t.annotate(runner)
488
sage: DC.timer = t
489
sage: D = DictAsObject({'err':None})
490
sage: runner.update_results(D)
491
0
492
sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
493
[... tests, 0.00 s]
494
sage: runner.failures = 1
495
sage: runner.update_results(D)
496
1
497
sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Doctest output including the failure...")
498
[... tests, 1 failure, 0.00 s]
499
500
Now we can show the output of finalize::
501
502
sage: DC.sources = [None] * 4 # to fool the finalize method
503
sage: DTR.finalize()
504
----------------------------------------------------------------------
505
sage -t .../sage/doctest/reporting.py # Timed out
506
sage -t .../sage/doctest/reporting.py # Bad exit: 3
507
sage -t .../sage/doctest/reporting.py # 1 doctest failed
508
----------------------------------------------------------------------
509
Total time for all tests: 0.0 seconds
510
cpu time: 0.0 seconds
511
cumulative wall time: 0.0 seconds
512
513
If we interrupted doctests, then the number of files tested
514
will not match the number of sources on the controller::
515
516
sage: DC.sources = [None] * 6
517
sage: DTR.finalize()
518
<BLANKLINE>
519
----------------------------------------------------------------------
520
sage -t .../sage/doctest/reporting.py # Timed out
521
sage -t .../sage/doctest/reporting.py # Bad exit: 3
522
sage -t .../sage/doctest/reporting.py # 1 doctest failed
523
Doctests interrupted: 4/6 files tested
524
----------------------------------------------------------------------
525
Total time for all tests: 0.0 seconds
526
cpu time: 0.0 seconds
527
cumulative wall time: 0.0 seconds
528
"""
529
log = self.controller.log
530
postscript = self.postscript
531
if self.sources_completed < len(self.controller.sources) * self.controller.options.global_iterations:
532
postscript['lines'].append("Doctests interrupted: %s/%s files tested"%(self.sources_completed, len(self.controller.sources)))
533
self.error_status |= 128
534
elif not postscript['lines']:
535
postscript['lines'].append("All tests passed!")
536
log('-' * 70)
537
log("\n".join(postscript['lines']))
538
log('-' * 70)
539
log("Total time for all tests: %.1f seconds" % self.controller.timer.walltime)
540
log(" cpu time: %.1f seconds" % postscript['cputime'])
541
log(" cumulative wall time: %.1f seconds" % postscript['walltime'])
542
sys.stdout.flush()
543
544