1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
The JS Shell Test Harness.
See the adjacent README.txt for more details.
"""
import math
import os
import platform
import posixpath
import re
import shlex
import sys
import tempfile
from contextlib import contextmanager
from copy import copy
from datetime import datetime
from itertools import chain
from os.path import abspath, dirname, isfile, realpath
from subprocess import call, list2cmdline
from lib.adaptor import xdr_annotate
from lib.progressbar import ProgressBar
from lib.results import ResultsSink, TestOutput
from lib.tempfile import TemporaryDirectory
from lib.tests import (
RefTestCase,
change_env,
get_cpu_count,
get_environment_overlay,
get_jitflags,
)
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
from lib.tasks_unix import run_all_tests
else:
from lib.tasks_win import run_all_tests
here = dirname(abspath(__file__))
@contextmanager
def changedir(dirname):
pwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(pwd)
class PathOptions(object):
def __init__(self, location, requested_paths, excluded_paths):
self.requested_paths = requested_paths
self.excluded_files, self.excluded_dirs = PathOptions._split_files_and_dirs(
location, excluded_paths
)
@staticmethod
def _split_files_and_dirs(location, paths):
"""Split up a set of paths into files and directories"""
files, dirs = set(), set()
for path in paths:
fullpath = os.path.join(location, path)
if path.endswith("/"):
dirs.add(path[:-1])
elif os.path.isdir(fullpath):
dirs.add(path)
elif os.path.exists(fullpath):
files.add(path)
return files, dirs
def should_run(self, filename):
# If any tests are requested by name, skip tests that do not match.
if self.requested_paths and not any(
req in filename for req in self.requested_paths
):
return False
# Skip excluded tests.
if filename in self.excluded_files:
return False
for dir in self.excluded_dirs:
if filename.startswith(dir + "/"):
return False
return True
def parse_args():
"""
Parse command line arguments.
Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
options :object: The raw OptionParser output.
js_shell :str: The absolute location of the shell to test with.
requested_paths :set<str>: Test paths specially requested on the CLI.
excluded_paths :set<str>: Test paths specifically excluded by the CLI.
"""
from argparse import ArgumentParser
op = ArgumentParser(
description="Run jstests JS shell tests",
epilog="Shell output format: [ pass | fail | timeout | skip ] progress | time",
)
op.add_argument(
"--xul-info",
dest="xul_info_src",
help="config data for xulRuntime" " (avoids search for config/autoconf.mk)",
)
harness_og = op.add_argument_group("Harness Controls", "Control how tests are run.")
harness_og.add_argument(
"-j",
"--worker-count",
type=int,
default=max(1, get_cpu_count()),
help="Number of tests to run in parallel" " (default %(default)s)",
)
harness_og.add_argument(
"-t",
"--timeout",
type=float,
default=150.0,
help="Set maximum time a test is allows to run" " (in seconds).",
)
harness_og.add_argument(
"--show-slow",
action="store_true",
help="Show tests taking longer than a minimum time" " (in seconds).",
)
harness_og.add_argument(
"--slow-test-threshold",
type=float,
default=5.0,
help="Time in seconds a test can take until it is"
"considered slow (default %(default)s).",
)
harness_og.add_argument(
"-a",
"--args",
dest="shell_args",
default="",
help="Extra args to pass to the JS shell.",
)
harness_og.add_argument(
"--feature-args",
dest="feature_args",
default="",
help="Extra args to pass to the JS shell even when feature-testing.",
)
harness_og.add_argument(
"--jitflags",
dest="jitflags",
default="none",
type=str,
help="IonMonkey option combinations. One of all,"
" debug, ion, and none (default %(default)s).",
)
harness_og.add_argument(
"--tbpl",
action="store_true",
help="Runs each test in all configurations tbpl" " tests.",
)
harness_og.add_argument(
"--tbpl-debug",
action="store_true",
help="Runs each test in some faster configurations" " tbpl tests.",
)
harness_og.add_argument(
"-g", "--debug", action="store_true", help="Run a test in debugger."
)
harness_og.add_argument(
"--debugger", default="gdb -q --args", help="Debugger command."
)
harness_og.add_argument(
"-J", "--jorendb", action="store_true", help="Run under JS debugger."
)
harness_og.add_argument(
"--passthrough",
action="store_true",
help="Run tests with stdin/stdout attached to" " caller.",
)
harness_og.add_argument(
"--test-reflect-stringify",
dest="test_reflect_stringify",
help="instead of running tests, use them to test the "
"Reflect.stringify code in specified file",
)
harness_og.add_argument(
"--valgrind", action="store_true", help="Run tests in valgrind."
)
harness_og.add_argument(
"--valgrind-args", default="", help="Extra args to pass to valgrind."
)
harness_og.add_argument(
"--rr",
action="store_true",
help="Run tests under RR record-and-replay debugger.",
)
harness_og.add_argument(
"-C",
"--check-output",
action="store_true",
help="Run tests to check output for different jit-flags",
)
harness_og.add_argument(
"--remote", action="store_true", help="Run tests on a remote device"
)
harness_og.add_argument(
"--deviceIP",
action="store",
type=str,
dest="device_ip",
help="IP address of remote device to test",
)
harness_og.add_argument(
"--devicePort",
action="store",
type=int,
dest="device_port",
default=20701,
help="port of remote device to test",
)
harness_og.add_argument(
"--deviceSerial",
action="store",
type=str,
dest="device_serial",
default=None,
help="ADB device serial number of remote device to test",
)
harness_og.add_argument(
"--remoteTestRoot",
dest="remote_test_root",
action="store",
type=str,
default="/data/local/tmp/test_root",
help="The remote directory to use as test root" " (e.g. %(default)s)",
)
harness_og.add_argument(
"--localLib",
dest="local_lib",
action="store",
type=str,
help="The location of libraries to push -- preferably" " stripped",
)
harness_og.add_argument(
"--no-xdr",
dest="use_xdr",
action="store_false",
help="Whether to disable caching of self-hosted parsed content in XDR format.",
)
input_og = op.add_argument_group("Inputs", "Change what tests are run.")
input_og.add_argument(
"-f",
"--file",
dest="test_file",
action="append",
help="Get tests from the given file.",
)
input_og.add_argument(
"-x",
"--exclude-file",
action="append",
help="Exclude tests from the given file.",
)
input_og.add_argument(
"--wpt",
dest="wpt",
choices=["enabled", "disabled", "if-running-everything"],
default="if-running-everything",
help="Enable or disable shell web-platform-tests "
"(default: enable if no test paths are specified).",
)
input_og.add_argument(
"--include",
action="append",
dest="requested_paths",
default=[],
help="Include the given test file or directory.",
)
input_og.add_argument(
"--exclude",
action="append",
dest="excluded_paths",
default=[],
help="Exclude the given test file or directory.",
)
input_og.add_argument(
"-d",
"--exclude-random",
dest="random",
action="store_false",
help='Exclude tests marked as "random."',
)
input_og.add_argument(
"--run-skipped", action="store_true", help='Run tests marked as "skip."'
)
input_og.add_argument(
"--run-only-skipped",
action="store_true",
help='Run only tests marked as "skip."',
)
input_og.add_argument(
"--run-slow-tests",
action="store_true",
help='Do not skip tests marked as "slow."',
)
input_og.add_argument(
"--no-extensions",
action="store_true",
help="Run only tests conforming to the ECMAScript 5" " standard.",
)
input_og.add_argument(
"--repeat", type=int, default=1, help="Repeat tests the given number of times."
)
output_og = op.add_argument_group("Output", "Modify the harness and tests output.")
output_og.add_argument(
"-s",
"--show-cmd",
action="store_true",
help="Show exact commandline used to run each test.",
)
output_og.add_argument(
"-o",
"--show-output",
action="store_true",
help="Print each test's output to the file given by" " --output-file.",
)
output_og.add_argument(
"-F",
"--failed-only",
action="store_true",
help="If a --show-* option is given, only print" " output for failed tests.",
)
output_og.add_argument(
"--no-show-failed",
action="store_true",
help="Don't print output for failed tests" " (no-op with --show-output).",
)
output_og.add_argument(
"-O",
"--output-file",
help="Write all output to the given file" " (default: stdout).",
)
output_og.add_argument(
"--failure-file", help="Write all not-passed tests to the given file."
)
output_og.add_argument(
"--no-progress",
dest="hide_progress",
action="store_true",
help="Do not show the progress bar.",
)
output_og.add_argument(
"--tinderbox",
dest="format",
action="store_const",
const="automation",
help="Use automation-parseable output format.",
)
output_og.add_argument(
"--format",
dest="format",
default="none",
choices=["automation", "none"],
help="Output format. Either automation or none" " (default %(default)s).",
)
output_og.add_argument(
"--log-wptreport",
dest="wptreport",
action="store",
help="Path to write a Web Platform Tests report (wptreport)",
)
output_og.add_argument(
"--this-chunk", type=int, default=1, help="The test chunk to run."
)
output_og.add_argument(
"--total-chunks", type=int, default=1, help="The total number of test chunks."
)
special_og = op.add_argument_group(
"Special", "Special modes that do not run tests."
)
special_og.add_argument(
"--make-manifests",
metavar="BASE_TEST_PATH",
help="Generate reftest manifest files.",
)
op.add_argument("--js-shell", metavar="JS_SHELL", help="JS shell to run tests with")
op.add_argument(
"-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
)
options, args = op.parse_known_args()
# Need a shell unless in a special mode.
if not options.make_manifests:
if not args:
op.error("missing JS_SHELL argument")
options.js_shell = os.path.abspath(args.pop(0))
requested_paths = set(args)
# Valgrind, gdb, and rr are mutually exclusive.
if sum(map(bool, (options.valgrind, options.debug, options.rr))) > 1:
op.error("--valgrind, --debug, and --rr are mutually exclusive.")
# Fill the debugger field, as needed.
if options.debug:
if options.debugger == "lldb":
debugger_prefix = ["lldb", "--"]
else:
debugger_prefix = options.debugger.split()
else:
debugger_prefix = []
if options.valgrind:
debugger_prefix = ["valgrind"] + options.valgrind_args.split()
if os.uname()[0] == "Darwin":
debugger_prefix.append("--dsymutil=yes")
options.show_output = True
if options.rr:
debugger_prefix = ["rr", "record"]
js_cmd_args = shlex.split(options.shell_args) + shlex.split(options.feature_args)
if options.jorendb:
options.passthrough = True
options.hide_progress = True
options.worker_count = 1
debugger_path = realpath(
os.path.join(
abspath(dirname(abspath(__file__))),
"..",
"..",
"examples",
"jorendb.js",
)
)
js_cmd_args.extend(["-d", "-f", debugger_path, "--"])
prefix = RefTestCase.build_js_cmd_prefix(
options.js_shell, js_cmd_args, debugger_prefix
)
# If files with lists of tests to run were specified, add them to the
# requested tests set.
if options.test_file:
for test_file in options.test_file:
requested_paths |= set(
[line.strip() for line in open(test_file).readlines()]
)
excluded_paths = set(options.excluded_paths)
# If files with lists of tests to exclude were specified, add them to the
# excluded tests set.
if options.exclude_file:
for filename in options.exclude_file:
with open(filename, "r") as fp:
for line in fp:
if line.startswith("#"):
continue
line = line.strip()
if not line:
continue
excluded_paths.add(line)
# Handle output redirection, if requested and relevant.
options.output_fp = sys.stdout
if options.output_file:
if not options.show_cmd:
options.show_output = True
try:
options.output_fp = open(options.output_file, "w")
except IOError as ex:
raise SystemExit("Failed to open output file: " + str(ex))
# Hide the progress bar if it will get in the way of other output.
options.hide_progress = (
options.format == "automation"
or not ProgressBar.conservative_isatty()
or options.hide_progress
)
return (options, prefix, requested_paths, excluded_paths)
def load_wpt_tests(xul_tester, requested_paths, excluded_paths, update_manifest=True):
"""Return a list of `RefTestCase` objects for the jsshell testharness.js
tests filtered by the given paths and debug-ness."""
repo_root = abspath(os.path.join(here, "..", "..", ".."))
wp = os.path.join(repo_root, "testing", "web-platform")
wpt = os.path.join(wp, "tests")
sys_paths = [
"python/mozterm",
"python/mozboot",
"testing/mozbase/mozcrash",
"testing/mozbase/mozdevice",
"testing/mozbase/mozfile",
"testing/mozbase/mozinfo",
"testing/mozbase/mozleak",
"testing/mozbase/mozlog",
"testing/mozbase/mozprocess",
"testing/mozbase/mozprofile",
"testing/mozbase/mozrunner",
"testing/mozbase/mozversion",
"testing/web-platform/",
"testing/web-platform/tests/tools",
"testing/web-platform/tests/tools/third_party/html5lib",
"testing/web-platform/tests/tools/third_party/webencodings",
"testing/web-platform/tests/tools/wptrunner",
"testing/web-platform/tests/tools/wptserve",
"third_party/python/requests",
]
abs_sys_paths = [os.path.join(repo_root, path) for path in sys_paths]
failed = False
for path in abs_sys_paths:
if not os.path.isdir(path):
failed = True
print("Could not add '%s' to the path")
if failed:
return []
sys.path[0:0] = abs_sys_paths
import manifestupdate
from wptrunner import products, testloader, wptcommandline, wptlogging, wpttest
manifest_root = tempfile.gettempdir()
(maybe_dist, maybe_bin) = os.path.split(os.path.dirname(xul_tester.js_bin))
if maybe_bin == "bin":
(maybe_root, maybe_dist) = os.path.split(maybe_dist)
if maybe_dist == "dist":
if os.path.exists(os.path.join(maybe_root, "_tests")):
# Assume this is a gecko objdir.
manifest_root = maybe_root
logger = wptlogging.setup({}, {})
test_manifests = manifestupdate.run(
repo_root, manifest_root, logger, update=update_manifest
)
kwargs = vars(wptcommandline.create_parser().parse_args([]))
kwargs.update(
{
"config": os.path.join(
manifest_root, "_tests", "web-platform", "wptrunner.local.ini"
),
"gecko_e10s": False,
"product": "firefox",
"verify": False,
"wasm": xul_tester.test("wasmIsSupported()"),
}
)
wptcommandline.set_from_config(kwargs)
def filter_jsshell_tests(it):
for item_type, path, tests in it:
tests = set(item for item in tests if item.jsshell)
if tests:
yield item_type, path, tests
run_info_extras = products.Product(kwargs["config"], "firefox").run_info_extras(
logger, **kwargs
)
run_info = wpttest.get_run_info(
kwargs["run_info"],
"firefox",
debug=xul_tester.test("isDebugBuild"),
extras=run_info_extras,
)
release_or_beta = xul_tester.test("getBuildConfiguration('release_or_beta')")
run_info["release_or_beta"] = release_or_beta
run_info["nightly_build"] = not release_or_beta
early_beta_or_earlier = xul_tester.test(
"getBuildConfiguration('early_beta_or_earlier')"
)
run_info["early_beta_or_earlier"] = early_beta_or_earlier
path_filter = testloader.TestFilter(
test_manifests, include=requested_paths, exclude=excluded_paths
)
subsuites = testloader.load_subsuites(logger, run_info, None, set())
loader = testloader.TestLoader(
test_manifests,
["testharness"],
run_info,
subsuites=subsuites,
manifest_filters=[path_filter, filter_jsshell_tests],
)
extra_helper_paths = [
os.path.join(here, "web-platform-test-shims.js"),
os.path.join(wpt, "resources", "testharness.js"),
os.path.join(here, "testharnessreport.js"),
]
def resolve(test_path, script):
if script.startswith("/"):
return os.path.join(wpt, script[1:])
return os.path.join(wpt, os.path.dirname(test_path), script)
tests = []
for test in loader.tests[""]["testharness"]:
test_path = os.path.relpath(test.path, wpt)
scripts = [resolve(test_path, s) for s in test.scripts]
extra_helper_paths_for_test = extra_helper_paths + scripts
# We must create at least one test with the default options, along with
# one test for each option given in a test-also annotation.
options = [None]
for m in test.itermeta():
if m.has_key("test-also"): # NOQA: W601
options += m.get("test-also").split()
for option in options:
test_case = RefTestCase(
wpt,
test_path,
extra_helper_paths=extra_helper_paths_for_test[:],
wpt=test,
)
if option:
test_case.options.append(option)
tests.append(test_case)
return tests
def load_tests(options, requested_paths, excluded_paths):
"""
Returns a tuple: (test_count, test_gen)
test_count: [int] Number of tests that will be in test_gen
test_gen: [iterable<Test>] Tests found that should be run.
"""
import lib.manifest as manifest
if options.js_shell is None:
xul_tester = manifest.NullXULInfoTester()
else:
if options.xul_info_src is None:
xul_info = manifest.XULInfo.create(options.js_shell)
else:
xul_abi, xul_os, xul_debug = options.xul_info_src.split(r":")
xul_debug = xul_debug.lower() == "true"
xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
feature_args = shlex.split(options.feature_args)
xul_tester = manifest.XULInfoTester(xul_info, options, feature_args)
test_dir = dirname(abspath(__file__))
path_options = PathOptions(test_dir, requested_paths, excluded_paths)
test_count = manifest.count_tests(test_dir, path_options)
test_gen = manifest.load_reftests(test_dir, path_options, xul_tester)
# WPT tests are already run in the browser in their own harness.
wpt_enabled = options.wpt == "enabled" or (
options.wpt == "if-running-everything"
and len(requested_paths) == 0
and not options.make_manifests
)
if wpt_enabled:
wpt_tests = load_wpt_tests(xul_tester, requested_paths, excluded_paths)
test_count += len(wpt_tests)
test_gen = chain(test_gen, wpt_tests)
if options.test_reflect_stringify is not None:
def trs_gen(tests):
for test in tests:
test.test_reflect_stringify = options.test_reflect_stringify
# Even if the test is not normally expected to pass, we still
# expect reflect-stringify to be able to handle it.
test.expect = True
test.random = False
test.slow = False
yield test
test_gen = trs_gen(test_gen)
if options.make_manifests:
manifest.make_manifests(options.make_manifests, test_gen)
sys.exit()
# Create a new test list. Apply each TBPL configuration to every test.
flags_list = None
if options.tbpl:
flags_list = get_jitflags("all")
elif options.tbpl_debug:
flags_list = get_jitflags("debug")
else:
flags_list = get_jitflags(options.jitflags, none=None)
if flags_list:
def flag_gen(tests):
for test in tests:
for jitflags in flags_list:
tmp_test = copy(test)
tmp_test.jitflags = copy(test.jitflags)
tmp_test.jitflags.extend(jitflags)
yield tmp_test
test_count = test_count * len(flags_list)
test_gen = flag_gen(test_gen)
if options.test_file:
paths = set()
for test_file in options.test_file:
paths |= set([line.strip() for line in open(test_file).readlines()])
test_gen = (_ for _ in test_gen if _.path in paths)
if options.no_extensions:
pattern = os.sep + "extensions" + os.sep
test_gen = (_ for _ in test_gen if pattern not in _.path)
if not options.random:
test_gen = (_ for _ in test_gen if not _.random)
if options.run_only_skipped:
options.run_skipped = True
test_gen = (_ for _ in test_gen if not _.enable)
if not options.run_slow_tests:
test_gen = (_ for _ in test_gen if not _.slow)
if options.repeat:
test_gen = (test for test in test_gen for i in range(options.repeat))
test_count *= options.repeat
return test_count, test_gen
def main():
options, prefix, requested_paths, excluded_paths = parse_args()
if options.js_shell is not None and not (
isfile(options.js_shell) and os.access(options.js_shell, os.X_OK)
):
if (
platform.system() != "Windows"
or isfile(options.js_shell)
or not isfile(options.js_shell + ".exe")
or not os.access(options.js_shell + ".exe", os.X_OK)
):
print("Could not find executable shell: " + options.js_shell)
return 1
test_count, test_gen = load_tests(options, requested_paths, excluded_paths)
test_environment = get_environment_overlay(options.js_shell, options.gc_zeal)
if test_count == 0:
print("no tests selected")
return 1
test_dir = dirname(abspath(__file__))
if options.debug:
if test_count > 1:
print(
"Multiple tests match command line arguments,"
" debugger can only run one"
)
for tc in test_gen:
print(" {}".format(tc.path))
return 2
with changedir(test_dir), change_env(
test_environment
), TemporaryDirectory() as tempdir:
cmd = next(test_gen).get_command(prefix, tempdir)
if options.show_cmd:
print(list2cmdline(cmd))
call(cmd)
return 0
# The test_gen generator is converted into a list in
# run_all_tests. Go ahead and do it here so we can apply
# chunking.
#
# If chunking is enabled, determine which tests are part of this chunk.
# This code was adapted from testing/mochitest/runtestsremote.py.
if options.total_chunks > 1:
tests_per_chunk = math.ceil(test_count / float(options.total_chunks))
start = int(round((options.this_chunk - 1) * tests_per_chunk))
end = int(round(options.this_chunk * tests_per_chunk))
test_gen = list(test_gen)[start:end]
if options.remote:
results = ResultsSink("jstests", options, test_count)
try:
from lib.remote import init_device, init_remote_dir
device = init_device(options)
tempdir = posixpath.join(options.remote_test_root, "tmp")
jtd_tests = posixpath.join(options.remote_test_root, "tests", "tests")
init_remote_dir(device, jtd_tests)
device.push(test_dir, jtd_tests, timeout=600)
device.chmod(jtd_tests, recursive=True)
prefix[0] = options.js_shell
if options.use_xdr:
test_gen = xdr_annotate(test_gen, options)
for test in test_gen:
out = run_test_remote(test, device, prefix, tempdir, options)
results.push(out)
results.finish(True)
except KeyboardInterrupt:
results.finish(False)
return 0 if results.all_passed() else 1
with changedir(test_dir), change_env(
test_environment
), TemporaryDirectory() as tempdir:
results = ResultsSink("jstests", options, test_count)
try:
for out in run_all_tests(test_gen, prefix, tempdir, results.pb, options):
results.push(out)
results.finish(True)
except KeyboardInterrupt:
results.finish(False)
return 0 if results.all_passed() else 1
return 0
def run_test_remote(test, device, prefix, tempdir, options):
from mozdevice import ADBDevice, ADBProcessError
cmd = test.get_command(prefix, tempdir)
test_root_parent = os.path.dirname(test.root)
jtd_tests = posixpath.join(options.remote_test_root, "tests")
cmd = [_.replace(test_root_parent, jtd_tests) for _ in cmd]
env = {"TZ": "PST8PDT", "LD_LIBRARY_PATH": os.path.dirname(prefix[0])}
adb_cmd = ADBDevice._escape_command_line(cmd)
start = datetime.now()
try:
# Allow ADBError or ADBTimeoutError to terminate the test run,
# but handle ADBProcessError in order to support the use of
# non-zero exit codes in the JavaScript shell tests.
out = device.shell_output(
adb_cmd, env=env, cwd=options.remote_test_root, timeout=int(options.timeout)
)
returncode = 0
except ADBProcessError as e:
# Treat ignorable intermittent adb communication errors as
# skipped tests.
out = str(e.adb_process.stdout)
returncode = e.adb_process.exitcode
re_ignore = re.compile(r"error: (closed|device .* not found)")
if returncode == 1 and re_ignore.search(out):
print("Skipping {} due to ignorable adb error {}".format(test.path, out))
test.skip_if_cond = "true"
returncode = test.SKIPPED_EXIT_STATUS
elapsed = (datetime.now() - start).total_seconds()
# We can't distinguish between stdout and stderr so we pass
# the same buffer to both.
return TestOutput(test, cmd, out, out, returncode, elapsed, False)
if __name__ == "__main__":
sys.exit(main())
|