testing.py
18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
"""
Digress testing core.
"""
from digress.errors import SkippedTestError, DisabledTestError, NoSuchTestError, \
FailedTestError, AlreadyRunError, SCMError, \
ComparisonError
from digress.constants import *
from digress.cli import dispatchable
import inspect
import operator
import os
import json
import textwrap
from shutil import rmtree
from time import time
from functools import wraps
from itertools import izip_longest
from hashlib import sha1
class depends(object):
"""
Dependency decorator for a test.
"""
def __init__(self, *test_names):
self.test_names = test_names
def __call__(self, func):
func.digress_depends = self.test_names
return func
class _skipped(object):
"""
Internal skipped decorator.
"""
def __init__(self, reason=""):
self._reason = reason
def __call__(self, func):
@wraps(func)
def _closure(*args):
raise SkippedTestError(self._reason)
return _closure
class disabled(object):
"""
Disable a test, with reason.
"""
def __init__(self, reason=""):
self._reason = reason
def __call__(self, func):
@wraps(func)
def _closure(*args):
raise DisabledTestError(self._reason)
return _closure
class comparer(object):
"""
Set the comparer for a test.
"""
def __init__(self, comparer_):
self._comparer = comparer_
def __call__(self, func):
func.digress_comparer = self._comparer
return func
class Fixture(object):
cases = []
scm = None
flush_before = False
def _skip_case(self, case, depend):
for name, meth in inspect.getmembers(case):
if name[:5] == "test_":
setattr(
case,
name,
_skipped("failed dependency: case %s" % depend)(meth)
)
def _run_case(self, case, results):
if case.__name__ in results:
raise AlreadyRunError
for depend in case.depends:
if depend.__name__ in results and results[depend.__name__]["status"] != CASE_PASS:
self._skip_case(case, depend.__name__)
try:
result = self._run_case(depend, results)
except AlreadyRunError:
continue
if result["status"] != CASE_PASS:
self._skip_case(case, depend.__name__)
result = case().run()
results[case.__name__] = result
return result
@dispatchable
def flush(self, revision=None):
"""
Flush any cached results. Takes a revision for an optional argument.
"""
if not revision:
print "Flushing all cached results...",
try:
rmtree(".digress_%s" % self.__class__.__name__)
except Exception, e:
print "failed: %s" % e
else:
print "done."
else:
try:
rev = self.scm.rev_parse(revision)
except SCMError, e:
print e
else:
print "Flushing cached results for %s..." % rev,
try:
rmtree(os.path.join(".digress_%s" % self.__class__.__name__, rev))
except Exception, e:
print "failed: %s" % e
else:
print "done."
@dispatchable
def run(self, revision=None):
"""
Run the fixture for a specified revision.
Takes a revision for an argument.
"""
oldrev = None
oldbranch = None
dirty = False
try:
dirty = self.scm.dirty()
# if the tree is clean, then we don't need to make an exception
if not dirty and revision is None: revision = "HEAD"
if revision:
oldrev = self.scm.current_rev()
oldbranch = self.scm.current_branch()
if dirty:
self.scm.stash()
self.scm.checkout(revision)
rev = self.scm.current_rev()
self.datastore = os.path.join(".digress_%s" % self.__class__.__name__, rev)
if os.path.isdir(self.datastore):
if self.flush_before:
self.flush(rev)
else:
os.makedirs(self.datastore)
else:
rev = "(dirty working tree)"
self.datastore = None
print "Running fixture %s on revision %s...\n" % (self.__class__.__name__, rev)
results = {}
for case in self.cases:
try:
self._run_case(case, results)
except AlreadyRunError:
continue
total_time = reduce(operator.add, filter(
None,
[
result["time"] for result in results.values()
]
), 0)
overall_status = (
CASE_FAIL in [ result["status"] for result in results.values() ]
) and FIXTURE_FAIL or FIXTURE_PASS
print "Fixture %s in %.4f.\n" % (
(overall_status == FIXTURE_PASS) and "passed" or "failed",
total_time
)
return { "cases" : results, "time" : total_time, "status" : overall_status, "revision" : rev }
finally:
if oldrev:
self.scm.checkout(oldrev)
if oldbranch:
self.scm.checkout(oldbranch)
if dirty:
self.scm.unstash()
@dispatchable
def bisect(self, good_rev, bad_rev=None):
"""
Perform a bisection between two revisions.
First argument is the good revision, second is the bad revision, which
defaults to the current revision.
"""
if not bad_rev: bad_rev = self.scm.current_rev()
dirty = False
# get a set of results for the good revision
good_result = self.run(good_rev)
good_rev = good_result["revision"]
try:
dirty = self.scm.dirty()
if dirty:
self.scm.stash()
self.scm.bisect("start")
self.scm.bisect("bad", bad_rev)
self.scm.bisect("good", good_rev)
bisecting = True
isbad = False
while bisecting:
results = self.run(self.scm.current_rev())
revision = results["revision"]
# perform comparisons
# FIXME: this just uses a lot of self.compare
for case_name, case_result in good_result["cases"].iteritems():
case = filter(lambda case: case.__name__ == case_name, self.cases)[0]
for test_name, test_result in case_result["tests"].iteritems():
test = filter(
lambda pair: pair[0] == "test_%s" % test_name,
inspect.getmembers(case)
)[0][1]
other_result = results["cases"][case_name]["tests"][test_name]
if other_result["status"] == TEST_FAIL and case_result["status"] != TEST_FAIL:
print "Revision %s failed %s.%s." % (revision, case_name, test_name)
isbad = True
break
elif hasattr(test, "digress_comparer"):
try:
test.digress_comparer(test_result["value"], other_result["value"])
except ComparisonError, e:
print "%s differs: %s" % (test_name, e)
isbad = True
break
if isbad:
output = self.scm.bisect("bad", revision)
print "Marking revision %s as bad." % revision
else:
output = self.scm.bisect("good", revision)
print "Marking revision %s as good." % revision
if output.split("\n")[0].endswith("is the first bad commit"):
print "\nBisection complete.\n"
print output
bisecting = False
print ""
except SCMError, e:
print e
finally:
self.scm.bisect("reset")
if dirty:
self.scm.unstash()
@dispatchable
def multicompare(self, rev_a=None, rev_b=None, mode="waterfall"):
"""
Generate a comparison of tests.
Takes three optional arguments, from which revision, to which revision,
and the method of display (defaults to vertical "waterfall", also
accepts "river" for horizontal display)
"""
if not rev_a: rev_a = self.scm.current_rev()
if not rev_b: rev_b = self.scm.current_rev()
revisions = self.scm.revisions(rev_a, rev_b)
results = []
for revision in revisions:
results.append(self.run(revision))
test_names = reduce(operator.add, [
[
(case_name, test_name)
for
test_name, test_result
in
case_result["tests"].iteritems()
]
for
case_name, case_result
in
results[0]["cases"].iteritems()
], [])
MAXLEN = 20
colfmt = "| %s "
table = []
if mode not in ("waterfall", "river"):
mode = "waterfall"
print "Unknown multicompare mode specified, defaulting to %s." % mode
if mode == "waterfall":
header = [ "Test" ]
for result in results:
header.append(result["revision"])
table.append(header)
for test_name in test_names:
row_data = [ ".".join(test_name) ]
for result in results:
test_result = result["cases"][test_name[0]]["tests"][test_name[1]]
if test_result["status"] != TEST_PASS:
value = "did not pass: %s" % (test_result["value"])
else:
value = "%s (%.4f)" % (test_result["value"], test_result["time"])
row_data.append(value)
table.append(row_data)
elif mode == "river":
header = [ "Revision" ]
for test_name in test_names:
header.append(".".join(test_name))
table.append(header)
for result in results:
row_data = [ result["revision"] ]
for case_name, case_result in result["cases"].iteritems():
for test_name, test_result in case_result["tests"].iteritems():
if test_result["status"] != TEST_PASS:
value = "did not pass: %s" % (test_result["value"])
else:
value = "%s (%.4f)" % (test_result["value"], test_result["time"])
row_data.append(value)
table.append(row_data)
breaker = "=" * (len(colfmt % "".center(MAXLEN)) * len(table[0]) + 1)
print breaker
for row in table:
for row_stuff in izip_longest(*[
textwrap.wrap(col, MAXLEN, break_on_hyphens=False) for col in row
], fillvalue=""):
row_output = ""
for col in row_stuff:
row_output += colfmt % col.ljust(MAXLEN)
row_output += "|"
print row_output
print breaker
@dispatchable
def compare(self, rev_a, rev_b=None):
"""
Compare two revisions directly.
Takes two arguments, second is optional and implies current revision.
"""
results_a = self.run(rev_a)
results_b = self.run(rev_b)
for case_name, case_result in results_a["cases"].iteritems():
case = filter(lambda case: case.__name__ == case_name, self.cases)[0]
header = "Comparison of case %s" % case_name
print header
print "=" * len(header)
for test_name, test_result in case_result["tests"].iteritems():
test = filter(
lambda pair: pair[0] == "test_%s" % test_name,
inspect.getmembers(case)
)[0][1]
other_result = results_b["cases"][case_name]["tests"][test_name]
if test_result["status"] != TEST_PASS or other_result["status"] != TEST_PASS:
print "%s cannot be compared as one of the revisions have not passed it." % test_name
elif hasattr(test, "digress_comparer"):
try:
test.digress_comparer(test_result["value"], other_result["value"])
except ComparisonError, e:
print "%s differs: %s" % (test_name, e)
else:
print "%s does not differ." % test_name
else:
print "%s has no comparer and therefore cannot be compared." % test_name
print ""
@dispatchable
def list(self):
"""
List all available test cases, excluding dependencies.
"""
print "\nAvailable Test Cases"
print "===================="
for case in self.cases:
print case.__name__
def register_case(self, case):
case.fixture = self
self.cases.append(case)
class Case(object):
depends = []
fixture = None
def _get_test_by_name(self, test_name):
if not hasattr(self, "test_%s" % test_name):
raise NoSuchTestError(test_name)
return getattr(self, "test_%s" % test_name)
def _run_test(self, test, results):
test_name = test.__name__[5:]
if test_name in results:
raise AlreadyRunError
if hasattr(test, "digress_depends"):
for depend in test.digress_depends:
if depend in results and results[depend]["status"] != TEST_PASS:
test = _skipped("failed dependency: %s" % depend)(test)
dependtest = self._get_test_by_name(depend)
try:
result = self._run_test(dependtest, results)
except AlreadyRunError:
continue
if result["status"] != TEST_PASS:
test = _skipped("failed dependency: %s" % depend)(test)
start_time = time()
run_time = None
print "Running test %s..." % test_name,
try:
if not self.datastore:
# XXX: this smells funny
raise IOError
with open(os.path.join(
self.datastore,
"%s.json" % sha1(test_name).hexdigest()
), "r") as f:
result = json.load(f)
value = str(result["value"])
if result["status"] == TEST_DISABLED:
status = "disabled"
elif result["status"] == TEST_SKIPPED:
status = "skipped"
elif result["status"] == TEST_FAIL:
status = "failed"
elif result["status"] == TEST_PASS:
status = "passed"
value = "%s (in %.4f)" % (
result["value"] or "(no result)",
result["time"]
)
else:
status = "???"
print "%s (cached): %s" % (status, value)
except IOError:
try:
value = test()
except DisabledTestError, e:
print "disabled: %s" % e
status = TEST_DISABLED
value = str(e)
except SkippedTestError, e:
print "skipped: %s" % e
status = TEST_SKIPPED
value = str(e)
except FailedTestError, e:
print "failed: %s" % e
status = TEST_FAIL
value = str(e)
except Exception, e:
print "failed with exception: %s" % e
status = TEST_FAIL
value = str(e)
else:
run_time = time() - start_time
print "passed: %s (in %.4f)" % (
value or "(no result)",
run_time
)
status = TEST_PASS
result = { "status" : status, "value" : value, "time" : run_time }
if self.datastore:
with open(os.path.join(
self.datastore,
"%s.json" % sha1(test_name).hexdigest()
), "w") as f:
json.dump(result, f)
results[test_name] = result
return result
def run(self):
print "Running case %s..." % self.__class__.__name__
if self.fixture.datastore:
self.datastore = os.path.join(
self.fixture.datastore,
sha1(self.__class__.__name__).hexdigest()
)
if not os.path.isdir(self.datastore):
os.makedirs(self.datastore)
else:
self.datastore = None
results = {}
for name, meth in inspect.getmembers(self):
if name[:5] == "test_":
try:
self._run_test(meth, results)
except AlreadyRunError:
continue
total_time = reduce(operator.add, filter(
None, [
result["time"] for result in results.values()
]
), 0)
overall_status = (
TEST_FAIL in [ result["status"] for result in results.values() ]
) and CASE_FAIL or CASE_PASS
print "Case %s in %.4f.\n" % (
(overall_status == FIXTURE_PASS) and "passed" or "failed",
total_time
)
return { "tests" : results, "time" : total_time, "status" : overall_status }