aboutsummaryrefslogtreecommitdiff
path: root/test/spec_tests.py
diff options
context:
space:
mode:
authorJohn MacFarlane <jgm@berkeley.edu>2015-01-24 22:00:33 -0800
committerJohn MacFarlane <jgm@berkeley.edu>2015-01-24 22:00:33 -0800
commit9584c98612fcd797a67a0a1cbf0dff702dd0114d (patch)
treedf1ba59eb692a1691beec9c5d52aaadcef424d30 /test/spec_tests.py
parentd0f985e7f6f05f0e1a293bfb340728a2a8fc9043 (diff)
Added spec tests.
Diffstat (limited to 'test/spec_tests.py')
-rwxr-xr-xtest/spec_tests.py139
1 files changed, 139 insertions, 0 deletions
diff --git a/test/spec_tests.py b/test/spec_tests.py
new file mode 100755
index 0000000..6c276ca
--- /dev/null
+++ b/test/spec_tests.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import sys
+from difflib import unified_diff
+import argparse
+import re
+import json
+from normalize import normalize_html
+from cmark import CMark
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Run cmark tests.')
+ parser.add_argument('-p', '--program', dest='program', nargs='?', default=None,
+ help='program to test')
+ parser.add_argument('-s', '--spec', dest='spec', nargs='?', default='spec.txt',
+ help='path to spec')
+ parser.add_argument('-P', '--pattern', dest='pattern', nargs='?',
+ default=None, help='limit to sections matching regex pattern')
+ parser.add_argument('--library-dir', dest='library_dir', nargs='?',
+ default=None, help='directory containing dynamic library')
+ parser.add_argument('--no-normalize', dest='normalize',
+ action='store_const', const=False, default=True,
+ help='do not normalize HTML')
+ parser.add_argument('-d', '--dump-tests', dest='dump_tests',
+ action='store_const', const=True, default=False,
+ help='dump tests in JSON format')
+ parser.add_argument('--debug-normalization', dest='debug_normalization',
+ action='store_const', const=True,
+ default=False, help='filter stdin through normalizer for testing')
+ parser.add_argument('-n', '--number', type=int, default=None,
+ help='only consider the test with the given number')
+ args = parser.parse_args(sys.argv[1:])
+
+def print_test_header(headertext, example_number, start_line, end_line):
+ print("Example %d (lines %d-%d) %s" % (example_number,start_line,end_line,headertext))
+
+def do_test(test, normalize, result_counts):
+ [retcode, actual_html, err] = cmark.to_html(test['markdown'])
+ if retcode == 0:
+ expected_html = test['html']
+ unicode_error = None
+ if normalize:
+ try:
+ passed = normalize_html(actual_html) == normalize_html(expected_html)
+ except UnicodeDecodeError as e:
+ unicode_error = e
+ passed = False
+ else:
+ passed = actual_html == expected_html
+ if passed:
+ result_counts['pass'] += 1
+ else:
+ print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
+ sys.stdout.write(test['markdown'])
+ if unicode_error:
+ print("Unicode error: " + str(unicode_error))
+ print("Expected: " + repr(expected_html))
+ print("Got: " + repr(actual_html))
+ else:
+ expected_html_lines = expected_html.splitlines(True)
+ actual_html_lines = actual_html.splitlines(True)
+ for diffline in unified_diff(expected_html_lines, actual_html_lines,
+ "expected HTML", "actual HTML"):
+ sys.stdout.write(diffline)
+ sys.stdout.write('\n')
+ result_counts['fail'] += 1
+ else:
+ print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
+ print("program returned error code %d" % retcode)
+ print(err)
+ result_counts['error'] += 1
+
+def get_tests(specfile):
+ line_number = 0
+ start_line = 0
+ end_line = 0
+ example_number = 0
+ markdown_lines = []
+ html_lines = []
+ state = 0 # 0 regular text, 1 markdown example, 2 html output
+ headertext = ''
+ tests = []
+
+ header_re = re.compile('#+ ')
+
+ with open(specfile, 'r', encoding='utf-8') as specf:
+ for line in specf:
+ line_number = line_number + 1
+ if state == 0 and re.match(header_re, line):
+ headertext = header_re.sub('', line).strip()
+ if line.strip() == ".":
+ state = (state + 1) % 3
+ if state == 0:
+ example_number = example_number + 1
+ end_line = line_number
+ tests.append({
+ "markdown":''.join(markdown_lines).replace('→',"\t"),
+ "html":''.join(html_lines),
+ "example": example_number,
+ "start_line": start_line,
+ "end_line": end_line,
+ "section": headertext})
+ start_line = 0
+ markdown_lines = []
+ html_lines = []
+ elif state == 1:
+ if start_line == 0:
+ start_line = line_number - 1
+ markdown_lines.append(line)
+ elif state == 2:
+ html_lines.append(line)
+ return tests
+
+if __name__ == "__main__":
+ if args.debug_normalization:
+ print(normalize_html(sys.stdin.read()))
+ exit(0)
+
+ all_tests = get_tests(args.spec)
+ if args.pattern:
+ pattern_re = re.compile(args.pattern, re.IGNORECASE)
+ else:
+ pattern_re = re.compile('.')
+ tests = [ test for test in all_tests if re.search(pattern_re, test['section']) and (not args.number or test['example'] == args.number) ]
+ if args.dump_tests:
+ print(json.dumps(tests, ensure_ascii=False, indent=2))
+ exit(0)
+ else:
+ skipped = len(all_tests) - len(tests)
+ cmark = CMark(prog=args.program, library_dir=args.library_dir)
+ result_counts = {'pass': 0, 'fail': 0, 'error': 0, 'skip': skipped}
+ for test in tests:
+ do_test(test, args.normalize, result_counts)
+ print("{pass} passed, {fail} failed, {error} errored, {skip} skipped".format(**result_counts))
+ if result_counts['fail'] == 0 and result_counts['error'] == 0:
+ exit(0)
+ else:
+ exit(1)