| 1 | #!/usr/bin/env python |
| 2 | |
| 3 | from __future__ import print_function |
| 4 | import os, sys, re |
| 5 | import argparse, time |
| 6 | import signal, atexit |
| 7 | |
| 8 | from subprocess import Popen, STDOUT, PIPE |
| 9 | from select import select |
| 10 | |
| 11 | # Pseudo-TTY and terminal manipulation |
| 12 | import pty, array, fcntl, termios |
| 13 | |
| 14 | IS_PY_3 = sys.version_info[0] == 3 |
| 15 | |
| 16 | debug_file = None |
| 17 | log_file = None |
| 18 | |
| 19 | def debug(data): |
| 20 | if debug_file: |
| 21 | debug_file.write(data) |
| 22 | debug_file.flush() |
| 23 | |
| 24 | def log(data, end='\n'): |
| 25 | if log_file: |
| 26 | log_file.write(data + end) |
| 27 | log_file.flush() |
| 28 | print(data, end=end) |
| 29 | sys.stdout.flush() |
| 30 | |
| 31 | sep = "\n" |
| 32 | rundir = None |
| 33 | |
| 34 | parser = argparse.ArgumentParser( |
| 35 | description="Run a test file against a Mal implementation") |
| 36 | parser.add_argument('--rundir', |
| 37 | help="change to the directory before running tests") |
| 38 | parser.add_argument('--start-timeout', default=10, type=int, |
| 39 | help="default timeout for initial prompt") |
| 40 | parser.add_argument('--test-timeout', default=20, type=int, |
| 41 | help="default timeout for each individual test action") |
| 42 | parser.add_argument('--pre-eval', default=None, type=str, |
| 43 | help="Mal code to evaluate prior to running the test") |
| 44 | parser.add_argument('--no-pty', action='store_true', |
| 45 | help="Use direct pipes instead of pseudo-tty") |
| 46 | parser.add_argument('--log-file', type=str, |
| 47 | help="Write messages to the named file in addition the screen") |
| 48 | parser.add_argument('--debug-file', type=str, |
| 49 | help="Write all test interaction the named file") |
| 50 | parser.add_argument('--hard', action='store_true', |
| 51 | help="Turn soft tests (soft, deferrable, optional) into hard failures") |
| 52 | |
| 53 | # Control whether deferrable and optional tests are executed |
| 54 | parser.add_argument('--deferrable', dest='deferrable', action='store_true', |
| 55 | help="Enable deferrable tests that follow a ';>>> deferrable=True'") |
| 56 | parser.add_argument('--no-deferrable', dest='deferrable', action='store_false', |
| 57 | help="Disable deferrable tests that follow a ';>>> deferrable=True'") |
| 58 | parser.set_defaults(deferrable=True) |
| 59 | parser.add_argument('--optional', dest='optional', action='store_true', |
| 60 | help="Enable optional tests that follow a ';>>> optional=True'") |
| 61 | parser.add_argument('--no-optional', dest='optional', action='store_false', |
| 62 | help="Disable optional tests that follow a ';>>> optional=True'") |
| 63 | parser.set_defaults(optional=True) |
| 64 | |
| 65 | parser.add_argument('test_file', type=str, |
| 66 | help="a test file formatted as with mal test data") |
| 67 | parser.add_argument('mal_cmd', nargs="*", |
| 68 | help="Mal implementation command line. Use '--' to " |
| 69 | "specify a Mal command line with dashed options.") |
| 70 | parser.add_argument('--crlf', dest='crlf', action='store_true', |
| 71 | help="Write \\r\\n instead of \\n to the input") |
| 72 | |
| 73 | class Runner(): |
| 74 | def __init__(self, args, no_pty=False, line_break="\n"): |
| 75 | #print "args: %s" % repr(args) |
| 76 | self.no_pty = no_pty |
| 77 | |
| 78 | # Cleanup child process on exit |
| 79 | atexit.register(self.cleanup) |
| 80 | |
| 81 | self.p = None |
| 82 | env = os.environ |
| 83 | env['TERM'] = 'dumb' |
| 84 | env['INPUTRC'] = '/dev/null' |
| 85 | env['PERL_RL'] = 'false' |
| 86 | if no_pty: |
| 87 | self.p = Popen(args, bufsize=0, |
| 88 | stdin=PIPE, stdout=PIPE, stderr=STDOUT, |
| 89 | preexec_fn=os.setsid, |
| 90 | env=env) |
| 91 | self.stdin = self.p.stdin |
| 92 | self.stdout = self.p.stdout |
| 93 | else: |
| 94 | # provide tty to get 'interactive' readline to work |
| 95 | master, slave = pty.openpty() |
| 96 | |
| 97 | # Set terminal size large so that readline will not send |
| 98 | # ANSI/VT escape codes when the lines are long. |
| 99 | buf = array.array('h', [100, 200, 0, 0]) |
| 100 | fcntl.ioctl(master, termios.TIOCSWINSZ, buf, True) |
| 101 | |
| 102 | self.p = Popen(args, bufsize=0, |
| 103 | stdin=slave, stdout=slave, stderr=STDOUT, |
| 104 | preexec_fn=os.setsid, |
| 105 | env=env) |
| 106 | # Now close slave so that we will get an exception from |
| 107 | # read when the child exits early |
| 108 | # http://stackoverflow.com/questions/11165521 |
| 109 | os.close(slave) |
| 110 | self.stdin = os.fdopen(master, 'r+b', 0) |
| 111 | self.stdout = self.stdin |
| 112 | |
| 113 | #print "started" |
| 114 | self.buf = "" |
| 115 | self.last_prompt = "" |
| 116 | |
| 117 | self.line_break = line_break |
| 118 | |
| 119 | def read_to_prompt(self, prompts, timeout): |
| 120 | end_time = time.time() + timeout |
| 121 | while time.time() < end_time: |
| 122 | [outs,_,_] = select([self.stdout], [], [], 1) |
| 123 | if self.stdout in outs: |
| 124 | new_data = self.stdout.read(1) |
| 125 | new_data = new_data.decode("utf-8") if IS_PY_3 else new_data |
| 126 | #print("new_data: '%s'" % new_data) |
| 127 | debug(new_data) |
| 128 | # Perform newline cleanup |
| 129 | self.buf += new_data.replace("\r", "") |
| 130 | for prompt in prompts: |
| 131 | regexp = re.compile(prompt) |
| 132 | match = regexp.search(self.buf) |
| 133 | if match: |
| 134 | end = match.end() |
| 135 | buf = self.buf[0:match.start()] |
| 136 | self.buf = self.buf[end:] |
| 137 | self.last_prompt = prompt |
| 138 | return buf |
| 139 | return None |
| 140 | |
| 141 | def writeline(self, str): |
| 142 | def _to_bytes(s): |
| 143 | return bytes(s, "utf-8") if IS_PY_3 else s |
| 144 | |
| 145 | self.stdin.write(_to_bytes(str.replace('\r', '\x16\r') + self.line_break)) |
| 146 | |
| 147 | def cleanup(self): |
| 148 | #print "cleaning up" |
| 149 | if self.p: |
| 150 | try: |
| 151 | os.killpg(self.p.pid, signal.SIGTERM) |
| 152 | except OSError: |
| 153 | pass |
| 154 | self.p = None |
| 155 | |
| 156 | class TestReader: |
| 157 | def __init__(self, test_file): |
| 158 | self.line_num = 0 |
| 159 | f = open(test_file, newline='') if IS_PY_3 else open(test_file) |
| 160 | self.data = f.read().split('\n') |
| 161 | self.soft = False |
| 162 | self.deferrable = False |
| 163 | self.optional = False |
| 164 | |
| 165 | def next(self): |
| 166 | self.msg = None |
| 167 | self.form = None |
| 168 | self.out = "" |
| 169 | self.ret = None |
| 170 | |
| 171 | while self.data: |
| 172 | self.line_num += 1 |
| 173 | line = self.data.pop(0) |
| 174 | if re.match(r"^\s*$", line): # blank line |
| 175 | continue |
| 176 | elif line[0:3] == ";;;": # ignore comment |
| 177 | continue |
| 178 | elif line[0:2] == ";;": # output comment |
| 179 | self.msg = line[3:] |
| 180 | return True |
| 181 | elif line[0:5] == ";>>> ": # settings/commands |
| 182 | settings = {} |
| 183 | exec(line[5:], {}, settings) |
| 184 | if 'soft' in settings: |
| 185 | self.soft = settings['soft'] |
| 186 | if 'deferrable' in settings and settings['deferrable']: |
| 187 | self.deferrable = "\nSkipping deferrable and optional tests" |
| 188 | return True |
| 189 | if 'optional' in settings and settings['optional']: |
| 190 | self.optional = "\nSkipping optional tests" |
| 191 | return True |
| 192 | continue |
| 193 | elif line[0:1] == ";": # unexpected comment |
| 194 | raise Exception("Test data error at line %d:\n%s" % (self.line_num, line)) |
| 195 | self.form = line # the line is a form to send |
| 196 | |
| 197 | # Now find the output and return value |
| 198 | while self.data: |
| 199 | line = self.data[0] |
| 200 | if line[0:3] == ";=>": |
| 201 | self.ret = line[3:] |
| 202 | self.line_num += 1 |
| 203 | self.data.pop(0) |
| 204 | break |
| 205 | elif line[0:2] == ";/": |
| 206 | self.out = self.out + line[2:] + sep |
| 207 | self.line_num += 1 |
| 208 | self.data.pop(0) |
| 209 | else: |
| 210 | self.ret = "" |
| 211 | break |
| 212 | if self.ret != None: break |
| 213 | |
| 214 | if self.out[-1:] == sep and not self.ret: |
| 215 | # If there is no return value, output should not end in |
| 216 | # separator |
| 217 | self.out = self.out[0:-1] |
| 218 | return self.form |
| 219 | |
| 220 | args = parser.parse_args(sys.argv[1:]) |
| 221 | # Workaround argparse issue with two '--' on command line |
| 222 | if sys.argv.count('--') > 0: |
| 223 | args.mal_cmd = sys.argv[sys.argv.index('--')+1:] |
| 224 | |
| 225 | if args.rundir: os.chdir(args.rundir) |
| 226 | |
| 227 | if args.log_file: log_file = open(args.log_file, "a") |
| 228 | if args.debug_file: debug_file = open(args.debug_file, "a") |
| 229 | |
| 230 | r = Runner(args.mal_cmd, no_pty=args.no_pty, line_break="\r\n" if args.crlf else "\n") |
| 231 | t = TestReader(args.test_file) |
| 232 | |
| 233 | |
| 234 | def assert_prompt(runner, prompts, timeout): |
| 235 | # Wait for the initial prompt |
| 236 | header = runner.read_to_prompt(prompts, timeout=timeout) |
| 237 | if not header == None: |
| 238 | if header: |
| 239 | log("Started with:\n%s" % header) |
| 240 | else: |
| 241 | log("Did not receive one of following prompt(s): %s" % repr(prompts)) |
| 242 | log(" Got : %s" % repr(r.buf)) |
| 243 | sys.exit(1) |
| 244 | |
| 245 | |
| 246 | # Wait for the initial prompt |
| 247 | try: |
| 248 | assert_prompt(r, ['[^\s()<>]+> '], args.start_timeout) |
| 249 | except: |
| 250 | _, exc, _ = sys.exc_info() |
| 251 | log("\nException: %s" % repr(exc)) |
| 252 | log("Output before exception:\n%s" % r.buf) |
| 253 | sys.exit(1) |
| 254 | |
| 255 | # Send the pre-eval code if any |
| 256 | if args.pre_eval: |
| 257 | sys.stdout.write("RUNNING pre-eval: %s" % args.pre_eval) |
| 258 | r.writeline(args.pre_eval) |
| 259 | assert_prompt(r, ['[^\s()<>]+> '], args.test_timeout) |
| 260 | |
| 261 | test_cnt = 0 |
| 262 | pass_cnt = 0 |
| 263 | fail_cnt = 0 |
| 264 | soft_fail_cnt = 0 |
| 265 | failures = [] |
| 266 | |
| 267 | while t.next(): |
| 268 | if args.deferrable == False and t.deferrable: |
| 269 | log(t.deferrable) |
| 270 | break |
| 271 | |
| 272 | if args.optional == False and t.optional: |
| 273 | log(t.optional) |
| 274 | break |
| 275 | |
| 276 | if t.msg != None: |
| 277 | log(t.msg) |
| 278 | continue |
| 279 | |
| 280 | if t.form == None: continue |
| 281 | |
| 282 | log("TEST: %s -> [%s,%s]" % (repr(t.form), repr(t.out), t.ret), end='') |
| 283 | |
| 284 | # The repeated form is to get around an occasional OS X issue |
| 285 | # where the form is repeated. |
| 286 | # https://github.com/kanaka/mal/issues/30 |
| 287 | expects = [".*%s%s%s" % (sep, t.out, re.escape(t.ret)), |
| 288 | ".*%s.*%s%s%s" % (sep, sep, t.out, re.escape(t.ret))] |
| 289 | |
| 290 | r.writeline(t.form) |
| 291 | try: |
| 292 | test_cnt += 1 |
| 293 | res = r.read_to_prompt(['\r\n[^\s()<>]+> ', '\n[^\s()<>]+> '], |
| 294 | timeout=args.test_timeout) |
| 295 | #print "%s,%s,%s" % (idx, repr(p.before), repr(p.after)) |
| 296 | if (t.ret == "" and t.out == ""): |
| 297 | log(" -> SUCCESS (result ignored)") |
| 298 | pass_cnt += 1 |
| 299 | elif (re.search(expects[0], res, re.S) or |
| 300 | re.search(expects[1], res, re.S)): |
| 301 | log(" -> SUCCESS") |
| 302 | pass_cnt += 1 |
| 303 | else: |
| 304 | if t.soft and not args.hard: |
| 305 | log(" -> SOFT FAIL (line %d):" % t.line_num) |
| 306 | soft_fail_cnt += 1 |
| 307 | fail_type = "SOFT " |
| 308 | else: |
| 309 | log(" -> FAIL (line %d):" % t.line_num) |
| 310 | fail_cnt += 1 |
| 311 | fail_type = "" |
| 312 | log(" Expected : %s" % repr(expects[0])) |
| 313 | log(" Got : %s" % repr(res)) |
| 314 | failed_test = """%sFAILED TEST (line %d): %s -> [%s,%s]: |
| 315 | Expected : %s |
| 316 | Got : %s""" % (fail_type, t.line_num, t.form, repr(t.out), |
| 317 | t.ret, repr(expects[0]), repr(res)) |
| 318 | failures.append(failed_test) |
| 319 | except: |
| 320 | _, exc, _ = sys.exc_info() |
| 321 | log("\nException: %s" % repr(exc)) |
| 322 | log("Output before exception:\n%s" % r.buf) |
| 323 | sys.exit(1) |
| 324 | |
| 325 | if len(failures) > 0: |
| 326 | log("\nFAILURES:") |
| 327 | for f in failures: |
| 328 | log(f) |
| 329 | |
| 330 | results = """ |
| 331 | TEST RESULTS (for %s): |
| 332 | %3d: soft failing tests |
| 333 | %3d: failing tests |
| 334 | %3d: passing tests |
| 335 | %3d: total tests |
| 336 | """ % (args.test_file, soft_fail_cnt, fail_cnt, |
| 337 | pass_cnt, test_cnt) |
| 338 | log(results) |
| 339 | |
| 340 | debug("\n") # add some separate to debug log |
| 341 | |
| 342 | if fail_cnt > 0: |
| 343 | sys.exit(1) |
| 344 | sys.exit(0) |