1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
|
# Library for JSTest tests.
#
# This contains classes that represent an individual test, including
# metadata, and know how to run the tests and determine failures.
import os
import sys
from contextlib import contextmanager
# When run on tbpl, we run each test multiple times with the following
# arguments.
JITFLAGS = {
"all": [
[], # no flags, normal baseline and ion
[
"--ion-eager",
"--ion-offthread-compile=off", # implies --baseline-eager
"--more-compartments",
],
[
"--ion-eager",
"--ion-offthread-compile=off",
"--ion-check-range-analysis",
"--ion-extra-checks",
"--no-sse3",
"--no-threads",
],
["--baseline-eager"],
["--no-blinterp", "--no-baseline", "--no-ion", "--more-compartments"],
["--blinterp-eager"],
],
# Like 'all' above but for jstests. This has fewer jit-specific
# configurations.
"jstests": [
[], # no flags, normal baseline and ion
[
"--ion-eager",
"--ion-offthread-compile=off", # implies --baseline-eager
"--more-compartments",
],
["--baseline-eager"],
["--no-blinterp", "--no-baseline", "--no-ion", "--more-compartments"],
],
# used by jit_test.py
"ion": [
["--baseline-eager"],
["--ion-eager", "--ion-offthread-compile=off", "--more-compartments"],
],
# Run reduced variants on debug builds, since they take longer time.
"debug": [
[], # no flags, normal baseline and ion
[
"--ion-eager",
"--ion-offthread-compile=off", # implies --baseline-eager
"--more-compartments",
],
["--baseline-eager"],
],
# Cover cases useful for tsan. Note that we test --ion-eager without
# --ion-offthread-compile=off here, because it helps catch races.
"tsan": [
[],
[
"--ion-eager",
"--ion-check-range-analysis",
"--ion-extra-checks",
"--no-sse3",
],
["--no-blinterp", "--no-baseline", "--no-ion"],
],
"baseline": [
["--no-ion"],
],
# Interpreter-only, for tools that cannot handle binary code generation.
"interp": [
[
"--no-blinterp",
"--no-baseline",
"--no-asmjs",
"--wasm-compiler=none",
"--no-native-regexp",
]
],
"none": [[]], # no flags, normal baseline and ion
}
def get_jitflags(variant, **kwargs):
if variant not in JITFLAGS:
print('Invalid jitflag: "{}"'.format(variant))
sys.exit(1)
if variant == "none" and "none" in kwargs:
return kwargs["none"]
return JITFLAGS[variant]
def valid_jitflags():
return JITFLAGS.keys()
def get_environment_overlay(js_shell):
"""
Build a dict of additional environment variables that must be set to run
tests successfully.
"""
# When updating this also update |buildBrowserEnv| in layout/tools/reftest/runreftest.py.
env = {
# Force Pacific time zone to avoid failures in Date tests.
"TZ": "PST8PDT",
# Force date strings to English.
"LC_ALL": "en_US.UTF-8",
# Tell the shell to disable crash dialogs on windows.
"XRE_NO_WINDOWS_CRASH_DIALOG": "1",
}
# Add the binary's directory to the library search path so that we find the
# nspr and icu we built, instead of the platform supplied ones (or none at
# all on windows).
if sys.platform.startswith("linux"):
env["LD_LIBRARY_PATH"] = os.path.dirname(js_shell)
elif sys.platform.startswith("darwin"):
env["DYLD_LIBRARY_PATH"] = os.path.dirname(js_shell)
elif sys.platform.startswith("win"):
env["PATH"] = os.path.dirname(js_shell)
return env
@contextmanager
def change_env(env_overlay):
# Apply the overlaid environment and record the current state.
prior_env = {}
for key, val in env_overlay.items():
prior_env[key] = os.environ.get(key, None)
if "PATH" in key and key in os.environ:
os.environ[key] = "{}{}{}".format(val, os.pathsep, os.environ[key])
else:
os.environ[key] = val
try:
# Execute with the new environment.
yield
finally:
# Restore the prior environment.
for key, val in prior_env.items():
if val is not None:
os.environ[key] = val
else:
del os.environ[key]
def get_cpu_count():
"""
Guess at a reasonable parallelism count to set as the default for the
current machine and run.
"""
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf("SC_NPROCESSORS_ONLN"))
if res > 0:
return res
except (AttributeError, ValueError):
pass
# Windows
try:
res = int(os.environ["NUMBER_OF_PROCESSORS"])
if res > 0:
return res
except (KeyError, ValueError):
pass
return 1
class RefTestCase(object):
"""A test case consisting of a test and an expected result."""
def __init__(self, root, path, extra_helper_paths=None, wpt=None):
# str: path of the tests root dir
self.root = root
# str: path of JS file relative to tests root dir
self.path = path
# [str]: Extra options to pass to the shell
self.options = []
# [str]: JIT flags to pass to the shell
self.jitflags = []
# [str]: flags to never pass to the shell for this test
self.ignoredflags = []
# str or None: path to reflect-stringify.js file to test
# instead of actually running tests
self.test_reflect_stringify = None
# bool: True => test is module code
self.is_module = False
# bool: True => test is asynchronous and runs additional code after completing the first
# turn of the event loop.
self.is_async = False
# bool: True => run test, False => don't run
self.enable = True
# str?: Optional error type
self.error = None
# bool: expected result, True => pass
self.expect = True
# bool: True => ignore output as 'random'
self.random = False
# bool: True => test may run slowly
self.slow = False
# Use self-hosted XDR instead of parsing the source stored in the binary.
# str?: Path computed when generating the command
self.selfhosted_xdr_path = None
# str: XDR mode (= "off", "encode", "decode") to use with the
# self-hosted code.
self.selfhosted_xdr_mode = "off"
# The terms parsed to produce the above properties.
self.terms = None
# The tag between |...| in the test header.
self.tag = None
# Anything occuring after -- in the test header.
self.comment = None
self.extra_helper_paths = extra_helper_paths or []
self.wpt = wpt
def prefix_command(self):
"""Return the '-f' options needed to run a test with the given path."""
path = self.path
prefix = []
while path != "":
assert path != "/"
path = os.path.dirname(path)
shell_path = os.path.join(self.root, path, "shell.js")
if os.path.exists(shell_path):
prefix.append(shell_path)
prefix.append("-f")
prefix.reverse()
for extra_path in self.extra_helper_paths:
prefix.append("-f")
prefix.append(extra_path)
return prefix
def abs_path(self):
return os.path.join(self.root, self.path)
def get_command(self, prefix, tempdir):
cmd = prefix + self.jitflags + self.options + self.prefix_command()
# Note: The tempdir provided as argument is managed by the caller
# should remain alive as long as the test harness. Therefore, the XDR
# content of the self-hosted code would be accessible to all JS Shell
# instances.
if self.selfhosted_xdr_mode != "off":
self.selfhosted_xdr_path = os.path.join(tempdir, "shell.xdr")
cmd += [
"--selfhosted-xdr-path",
self.selfhosted_xdr_path,
"--selfhosted-xdr-mode",
self.selfhosted_xdr_mode,
]
if self.test_reflect_stringify is not None:
cmd += [self.test_reflect_stringify, "--check", self.abs_path()]
elif self.is_module:
cmd += ["--module", self.abs_path()]
else:
cmd += ["-f", self.abs_path()]
for flag in self.ignoredflags:
if flag in cmd:
cmd.remove(flag)
return cmd
def __str__(self):
ans = self.path
if not self.enable:
ans += ", skip"
if self.error is not None:
ans += ", error=" + self.error
if not self.expect:
ans += ", fails"
if self.random:
ans += ", random"
if self.slow:
ans += ", slow"
if "-d" in self.options:
ans += ", debugMode"
return ans
@staticmethod
def build_js_cmd_prefix(js_path, js_args, debugger_prefix):
parts = []
if debugger_prefix:
parts += debugger_prefix
parts.append(js_path)
if js_args:
parts += js_args
return parts
def __cmp__(self, other):
if self.path == other.path:
return 0
elif self.path < other.path:
return -1
return 1
def __hash__(self):
return self.path.__hash__()
def __repr__(self):
return "<lib.tests.RefTestCase %s>" % (self.path,)
|