Mercurial > repos > rliterman > csp2
comparison CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/lib/python3.8/unittest/runner.py @ 68:5028fdace37b
planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author | jpayne |
---|---|
date | Tue, 18 Mar 2025 16:23:26 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
67:0e9998148a16 | 68:5028fdace37b |
---|---|
1 """Running tests""" | |
2 | |
3 import sys | |
4 import time | |
5 import warnings | |
6 | |
7 from . import result | |
8 from .signals import registerResult | |
9 | |
10 __unittest = True | |
11 | |
12 | |
13 class _WritelnDecorator(object): | |
14 """Used to decorate file-like objects with a handy 'writeln' method""" | |
15 def __init__(self,stream): | |
16 self.stream = stream | |
17 | |
18 def __getattr__(self, attr): | |
19 if attr in ('stream', '__getstate__'): | |
20 raise AttributeError(attr) | |
21 return getattr(self.stream,attr) | |
22 | |
23 def writeln(self, arg=None): | |
24 if arg: | |
25 self.write(arg) | |
26 self.write('\n') # text-mode streams translate to \r\n if needed | |
27 | |
28 | |
29 class TextTestResult(result.TestResult): | |
30 """A test result class that can print formatted text results to a stream. | |
31 | |
32 Used by TextTestRunner. | |
33 """ | |
34 separator1 = '=' * 70 | |
35 separator2 = '-' * 70 | |
36 | |
37 def __init__(self, stream, descriptions, verbosity): | |
38 super(TextTestResult, self).__init__(stream, descriptions, verbosity) | |
39 self.stream = stream | |
40 self.showAll = verbosity > 1 | |
41 self.dots = verbosity == 1 | |
42 self.descriptions = descriptions | |
43 | |
44 def getDescription(self, test): | |
45 doc_first_line = test.shortDescription() | |
46 if self.descriptions and doc_first_line: | |
47 return '\n'.join((str(test), doc_first_line)) | |
48 else: | |
49 return str(test) | |
50 | |
51 def startTest(self, test): | |
52 super(TextTestResult, self).startTest(test) | |
53 if self.showAll: | |
54 self.stream.write(self.getDescription(test)) | |
55 self.stream.write(" ... ") | |
56 self.stream.flush() | |
57 | |
58 def addSuccess(self, test): | |
59 super(TextTestResult, self).addSuccess(test) | |
60 if self.showAll: | |
61 self.stream.writeln("ok") | |
62 elif self.dots: | |
63 self.stream.write('.') | |
64 self.stream.flush() | |
65 | |
66 def addError(self, test, err): | |
67 super(TextTestResult, self).addError(test, err) | |
68 if self.showAll: | |
69 self.stream.writeln("ERROR") | |
70 elif self.dots: | |
71 self.stream.write('E') | |
72 self.stream.flush() | |
73 | |
74 def addFailure(self, test, err): | |
75 super(TextTestResult, self).addFailure(test, err) | |
76 if self.showAll: | |
77 self.stream.writeln("FAIL") | |
78 elif self.dots: | |
79 self.stream.write('F') | |
80 self.stream.flush() | |
81 | |
82 def addSkip(self, test, reason): | |
83 super(TextTestResult, self).addSkip(test, reason) | |
84 if self.showAll: | |
85 self.stream.writeln("skipped {0!r}".format(reason)) | |
86 elif self.dots: | |
87 self.stream.write("s") | |
88 self.stream.flush() | |
89 | |
90 def addExpectedFailure(self, test, err): | |
91 super(TextTestResult, self).addExpectedFailure(test, err) | |
92 if self.showAll: | |
93 self.stream.writeln("expected failure") | |
94 elif self.dots: | |
95 self.stream.write("x") | |
96 self.stream.flush() | |
97 | |
98 def addUnexpectedSuccess(self, test): | |
99 super(TextTestResult, self).addUnexpectedSuccess(test) | |
100 if self.showAll: | |
101 self.stream.writeln("unexpected success") | |
102 elif self.dots: | |
103 self.stream.write("u") | |
104 self.stream.flush() | |
105 | |
106 def printErrors(self): | |
107 if self.dots or self.showAll: | |
108 self.stream.writeln() | |
109 self.printErrorList('ERROR', self.errors) | |
110 self.printErrorList('FAIL', self.failures) | |
111 | |
112 def printErrorList(self, flavour, errors): | |
113 for test, err in errors: | |
114 self.stream.writeln(self.separator1) | |
115 self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) | |
116 self.stream.writeln(self.separator2) | |
117 self.stream.writeln("%s" % err) | |
118 | |
119 | |
120 class TextTestRunner(object): | |
121 """A test runner class that displays results in textual form. | |
122 | |
123 It prints out the names of tests as they are run, errors as they | |
124 occur, and a summary of the results at the end of the test run. | |
125 """ | |
126 resultclass = TextTestResult | |
127 | |
128 def __init__(self, stream=None, descriptions=True, verbosity=1, | |
129 failfast=False, buffer=False, resultclass=None, warnings=None, | |
130 *, tb_locals=False): | |
131 """Construct a TextTestRunner. | |
132 | |
133 Subclasses should accept **kwargs to ensure compatibility as the | |
134 interface changes. | |
135 """ | |
136 if stream is None: | |
137 stream = sys.stderr | |
138 self.stream = _WritelnDecorator(stream) | |
139 self.descriptions = descriptions | |
140 self.verbosity = verbosity | |
141 self.failfast = failfast | |
142 self.buffer = buffer | |
143 self.tb_locals = tb_locals | |
144 self.warnings = warnings | |
145 if resultclass is not None: | |
146 self.resultclass = resultclass | |
147 | |
148 def _makeResult(self): | |
149 return self.resultclass(self.stream, self.descriptions, self.verbosity) | |
150 | |
151 def run(self, test): | |
152 "Run the given test case or test suite." | |
153 result = self._makeResult() | |
154 registerResult(result) | |
155 result.failfast = self.failfast | |
156 result.buffer = self.buffer | |
157 result.tb_locals = self.tb_locals | |
158 with warnings.catch_warnings(): | |
159 if self.warnings: | |
160 # if self.warnings is set, use it to filter all the warnings | |
161 warnings.simplefilter(self.warnings) | |
162 # if the filter is 'default' or 'always', special-case the | |
163 # warnings from the deprecated unittest methods to show them | |
164 # no more than once per module, because they can be fairly | |
165 # noisy. The -Wd and -Wa flags can be used to bypass this | |
166 # only when self.warnings is None. | |
167 if self.warnings in ['default', 'always']: | |
168 warnings.filterwarnings('module', | |
169 category=DeprecationWarning, | |
170 message=r'Please use assert\w+ instead.') | |
171 startTime = time.perf_counter() | |
172 startTestRun = getattr(result, 'startTestRun', None) | |
173 if startTestRun is not None: | |
174 startTestRun() | |
175 try: | |
176 test(result) | |
177 finally: | |
178 stopTestRun = getattr(result, 'stopTestRun', None) | |
179 if stopTestRun is not None: | |
180 stopTestRun() | |
181 stopTime = time.perf_counter() | |
182 timeTaken = stopTime - startTime | |
183 result.printErrors() | |
184 if hasattr(result, 'separator2'): | |
185 self.stream.writeln(result.separator2) | |
186 run = result.testsRun | |
187 self.stream.writeln("Ran %d test%s in %.3fs" % | |
188 (run, run != 1 and "s" or "", timeTaken)) | |
189 self.stream.writeln() | |
190 | |
191 expectedFails = unexpectedSuccesses = skipped = 0 | |
192 try: | |
193 results = map(len, (result.expectedFailures, | |
194 result.unexpectedSuccesses, | |
195 result.skipped)) | |
196 except AttributeError: | |
197 pass | |
198 else: | |
199 expectedFails, unexpectedSuccesses, skipped = results | |
200 | |
201 infos = [] | |
202 if not result.wasSuccessful(): | |
203 self.stream.write("FAILED") | |
204 failed, errored = len(result.failures), len(result.errors) | |
205 if failed: | |
206 infos.append("failures=%d" % failed) | |
207 if errored: | |
208 infos.append("errors=%d" % errored) | |
209 else: | |
210 self.stream.write("OK") | |
211 if skipped: | |
212 infos.append("skipped=%d" % skipped) | |
213 if expectedFails: | |
214 infos.append("expected failures=%d" % expectedFails) | |
215 if unexpectedSuccesses: | |
216 infos.append("unexpected successes=%d" % unexpectedSuccesses) | |
217 if infos: | |
218 self.stream.writeln(" (%s)" % (", ".join(infos),)) | |
219 else: | |
220 self.stream.write("\n") | |
221 return result |