1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_CacheIR_h
#define jit_CacheIR_h
#include "mozilla/Assertions.h"
#include "mozilla/Attributes.h"
#include <stddef.h>
#include <stdint.h>
#include "jstypes.h"
#include "jit/CacheIROpsGenerated.h"
#include "js/GCAnnotations.h"
#include "js/Value.h"
struct JS_PUBLIC_API JSContext;
namespace js {
namespace jit {
// [SMDOC] CacheIR
//
// CacheIR is an (extremely simple) linear IR language for inline caches.
// From this IR, we can generate machine code for Baseline or Ion IC stubs.
//
// IRWriter
// --------
// CacheIR bytecode is written using IRWriter. This class also records some
// metadata that's used by the Baseline and Ion code generators to generate
// (efficient) machine code.
//
// Sharing Baseline stub code
// --------------------------
// Baseline stores data (like Shape* and fixed slot offsets) inside the ICStub
// structure, instead of embedding them directly in the JitCode. This makes
// Baseline IC code slightly slower, but allows us to share IC code between
// caches. CacheIR makes it easy to share code between stubs: stubs that have
// the same CacheIR (and CacheKind), will have the same Baseline stub code.
//
// Baseline stubs that share JitCode also share a CacheIRStubInfo structure.
// This class stores the CacheIR and the location of GC things stored in the
// stub, for the GC.
//
// JitZone has a CacheIRStubInfo* -> JitCode* weak map that's used to share both
// the IR and JitCode between Baseline CacheIR stubs. This HashMap owns the
// stubInfo (it uses UniquePtr), so once there are no references left to the
// shared stub code, we can also free the CacheIRStubInfo.
//
// Ion stubs
// ---------
// Unlike Baseline stubs, Ion stubs do not share stub code, and data stored in
// the IonICStub is baked into JIT code. This is one of the reasons Ion stubs
// are faster than Baseline stubs. Also note that Ion ICs contain more state
// (see IonGetPropertyIC for example) and use dynamic input/output registers,
// so sharing stub code for Ion would be much more difficult.
// An OperandId represents either a cache input or a value returned by a
// CacheIR instruction. Most code should use the ValOperandId and ObjOperandId
// classes below. The ObjOperandId class represents an operand that's known to
// be an object, just as StringOperandId represents a known string, etc.
class OperandId {
protected:
static const uint16_t InvalidId = UINT16_MAX;
uint16_t id_;
explicit OperandId(uint16_t id) : id_(id) {}
public:
OperandId() : id_(InvalidId) {}
uint16_t id() const { return id_; }
bool valid() const { return id_ != InvalidId; }
};
class ValOperandId : public OperandId {
public:
ValOperandId() = default;
explicit ValOperandId(uint16_t id) : OperandId(id) {}
};
class ValueTagOperandId : public OperandId {
public:
ValueTagOperandId() = default;
explicit ValueTagOperandId(uint16_t id) : OperandId(id) {}
};
class IntPtrOperandId : public OperandId {
public:
IntPtrOperandId() = default;
explicit IntPtrOperandId(uint16_t id) : OperandId(id) {}
};
class ObjOperandId : public OperandId {
public:
ObjOperandId() = default;
explicit ObjOperandId(uint16_t id) : OperandId(id) {}
bool operator==(const ObjOperandId& other) const { return id_ == other.id_; }
bool operator!=(const ObjOperandId& other) const { return id_ != other.id_; }
};
class NumberOperandId : public ValOperandId {
public:
NumberOperandId() = default;
explicit NumberOperandId(uint16_t id) : ValOperandId(id) {}
};
class StringOperandId : public OperandId {
public:
StringOperandId() = default;
explicit StringOperandId(uint16_t id) : OperandId(id) {}
};
class SymbolOperandId : public OperandId {
public:
SymbolOperandId() = default;
explicit SymbolOperandId(uint16_t id) : OperandId(id) {}
};
class BigIntOperandId : public OperandId {
public:
BigIntOperandId() = default;
explicit BigIntOperandId(uint16_t id) : OperandId(id) {}
};
class BooleanOperandId : public OperandId {
public:
BooleanOperandId() = default;
explicit BooleanOperandId(uint16_t id) : OperandId(id) {}
};
class Int32OperandId : public OperandId {
public:
Int32OperandId() = default;
explicit Int32OperandId(uint16_t id) : OperandId(id) {}
};
class TypedOperandId : public OperandId {
JSValueType type_;
public:
MOZ_IMPLICIT TypedOperandId(ObjOperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_OBJECT) {}
MOZ_IMPLICIT TypedOperandId(StringOperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_STRING) {}
MOZ_IMPLICIT TypedOperandId(SymbolOperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_SYMBOL) {}
MOZ_IMPLICIT TypedOperandId(BigIntOperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_BIGINT) {}
MOZ_IMPLICIT TypedOperandId(BooleanOperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_BOOLEAN) {}
MOZ_IMPLICIT TypedOperandId(Int32OperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_INT32) {}
MOZ_IMPLICIT TypedOperandId(ValueTagOperandId val)
: OperandId(val.id()), type_(JSVAL_TYPE_UNKNOWN) {}
MOZ_IMPLICIT TypedOperandId(IntPtrOperandId id)
: OperandId(id.id()), type_(JSVAL_TYPE_UNKNOWN) {}
TypedOperandId(ValOperandId val, JSValueType type)
: OperandId(val.id()), type_(type) {}
JSValueType type() const { return type_; }
};
#define CACHE_IR_KINDS(_) \
_(GetProp) \
_(GetElem) \
_(GetName) \
_(GetPropSuper) \
_(GetElemSuper) \
_(GetIntrinsic) \
_(SetProp) \
_(SetElem) \
_(BindName) \
_(In) \
_(HasOwn) \
_(CheckPrivateField) \
_(TypeOf) \
_(TypeOfEq) \
_(ToPropertyKey) \
_(InstanceOf) \
_(GetIterator) \
_(CloseIter) \
_(OptimizeGetIterator) \
_(OptimizeSpreadCall) \
_(Compare) \
_(ToBool) \
_(Call) \
_(UnaryArith) \
_(BinaryArith) \
_(NewObject) \
_(NewArray)
enum class CacheKind : uint8_t {
#define DEFINE_KIND(kind) kind,
CACHE_IR_KINDS(DEFINE_KIND)
#undef DEFINE_KIND
};
extern const char* const CacheKindNames[];
extern size_t NumInputsForCacheKind(CacheKind kind);
enum class CacheOp : uint16_t {
#define DEFINE_OP(op, ...) op,
CACHE_IR_OPS(DEFINE_OP)
#undef DEFINE_OP
NumOpcodes,
};
// CacheIR opcode info that's read in performance-sensitive code. Stored as a
// single byte per op for better cache locality.
struct CacheIROpInfo {
uint8_t argLength : 7;
bool transpile : 1;
};
static_assert(sizeof(CacheIROpInfo) == 1);
extern const CacheIROpInfo CacheIROpInfos[];
extern const char* const CacheIROpNames[];
inline const char* CacheIRCodeName(CacheOp op) {
return CacheIROpNames[static_cast<size_t>(op)];
}
extern const uint32_t CacheIROpHealth[];
class StubField {
public:
enum class Type : uint8_t {
// These fields take up a single word.
RawInt32,
RawPointer,
Shape,
WeakShape,
WeakGetterSetter,
JSObject,
WeakObject,
Symbol,
String,
WeakBaseScript,
JitCode,
Id,
AllocSite,
// These fields take up 64 bits on all platforms.
RawInt64,
First64BitType = RawInt64,
Value,
Double,
Limit
};
static bool sizeIsWord(Type type) {
MOZ_ASSERT(type != Type::Limit);
return type < Type::First64BitType;
}
static bool sizeIsInt64(Type type) {
MOZ_ASSERT(type != Type::Limit);
return type >= Type::First64BitType;
}
static size_t sizeInBytes(Type type) {
if (sizeIsWord(type)) {
return sizeof(uintptr_t);
}
MOZ_ASSERT(sizeIsInt64(type));
return sizeof(int64_t);
}
private:
uint64_t data_;
Type type_;
public:
StubField(uint64_t data, Type type) : data_(data), type_(type) {
MOZ_ASSERT_IF(sizeIsWord(), data <= UINTPTR_MAX);
}
Type type() const { return type_; }
bool sizeIsWord() const { return sizeIsWord(type_); }
bool sizeIsInt64() const { return sizeIsInt64(type_); }
size_t sizeInBytes() const { return sizeInBytes(type_); }
uintptr_t asWord() const {
MOZ_ASSERT(sizeIsWord());
return uintptr_t(data_);
}
uint64_t asInt64() const {
MOZ_ASSERT(sizeIsInt64());
return data_;
}
} JS_HAZ_GC_POINTER;
// This class is used to wrap up information about a call to make it
// easier to convey from one function to another. (In particular,
// CacheIRWriter encodes the CallFlags in CacheIR, and CacheIRReader
// decodes them and uses them for compilation.)
class CallFlags {
public:
enum ArgFormat : uint8_t {
Unknown,
Standard,
Spread,
FunCall,
FunApplyArgsObj,
FunApplyArray,
FunApplyNullUndefined,
LastArgFormat = FunApplyNullUndefined
};
CallFlags() = default;
explicit CallFlags(ArgFormat format) : argFormat_(format) {}
CallFlags(ArgFormat format, bool isConstructing, bool isSameRealm,
bool needsUninitializedThis)
: argFormat_(format),
isConstructing_(isConstructing),
isSameRealm_(isSameRealm),
needsUninitializedThis_(needsUninitializedThis) {}
CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false,
bool needsUninitializedThis = false)
: argFormat_(isSpread ? Spread : Standard),
isConstructing_(isConstructing),
isSameRealm_(isSameRealm),
needsUninitializedThis_(needsUninitializedThis) {}
ArgFormat getArgFormat() const { return argFormat_; }
bool isConstructing() const {
MOZ_ASSERT_IF(isConstructing_,
argFormat_ == Standard || argFormat_ == Spread);
return isConstructing_;
}
bool isSameRealm() const { return isSameRealm_; }
void setIsSameRealm() { isSameRealm_ = true; }
bool needsUninitializedThis() const { return needsUninitializedThis_; }
void setNeedsUninitializedThis() { needsUninitializedThis_ = true; }
uint8_t toByte() const {
// See CacheIRReader::callFlags()
MOZ_ASSERT(argFormat_ != ArgFormat::Unknown);
uint8_t value = getArgFormat();
if (isConstructing()) {
value |= CallFlags::IsConstructing;
}
if (isSameRealm()) {
value |= CallFlags::IsSameRealm;
}
if (needsUninitializedThis()) {
value |= CallFlags::NeedsUninitializedThis;
}
return value;
}
private:
ArgFormat argFormat_ = ArgFormat::Unknown;
bool isConstructing_ = false;
bool isSameRealm_ = false;
bool needsUninitializedThis_ = false;
// Used for encoding/decoding
static const uint8_t ArgFormatBits = 4;
static const uint8_t ArgFormatMask = (1 << ArgFormatBits) - 1;
static_assert(LastArgFormat <= ArgFormatMask, "Not enough arg format bits");
static const uint8_t IsConstructing = 1 << 5;
static const uint8_t IsSameRealm = 1 << 6;
static const uint8_t NeedsUninitializedThis = 1 << 7;
friend class CacheIRReader;
friend class CacheIRWriter;
};
// In baseline, we have to copy args onto the stack. Below this threshold, we
// will unroll the arg copy loop. We need to clamp this before providing it as
// an arg to a CacheIR op so that everything 5 or greater can share an IC.
const uint32_t MaxUnrolledArgCopy = 5;
inline uint32_t ClampFixedArgc(uint32_t argc) {
return std::min(argc, MaxUnrolledArgCopy);
}
enum class AttachDecision {
// We cannot attach a stub.
NoAction,
// We can attach a stub.
Attach,
// We cannot currently attach a stub, but we expect to be able to do so in the
// future. In this case, we do not call trackNotAttached().
TemporarilyUnoptimizable,
// We want to attach a stub, but the result of the operation is
// needed to generate that stub. For example, AddSlot needs to know
// the resulting shape. Note: the attached stub will inspect the
// inputs to the operation, so most input checks should be done
// before the actual operation, with only minimal checks remaining
// for the deferred portion. This prevents arbitrary scripted code
// run by the operation from interfering with the conditions being
// checked.
Deferred
};
// If the input expression evaluates to an AttachDecision other than NoAction,
// return that AttachDecision. If it is NoAction, do nothing.
#define TRY_ATTACH(expr) \
do { \
AttachDecision tryAttachTempResult_ = expr; \
if (tryAttachTempResult_ != AttachDecision::NoAction) { \
return tryAttachTempResult_; \
} \
} while (0)
// Set of arguments supported by GetIndexOfArgument.
// Support for higher argument indices can be added easily, but is currently
// unneeded.
enum class ArgumentKind : uint8_t {
Callee,
This,
NewTarget,
Arg0,
Arg1,
Arg2,
Arg3,
Arg4,
Arg5,
Arg6,
Arg7,
NumKinds
};
const uint8_t ArgumentKindArgIndexLimit =
uint8_t(ArgumentKind::NumKinds) - uint8_t(ArgumentKind::Arg0);
inline ArgumentKind ArgumentKindForArgIndex(uint32_t idx) {
MOZ_ASSERT(idx < ArgumentKindArgIndexLimit);
return ArgumentKind(uint32_t(ArgumentKind::Arg0) + idx);
}
// This function calculates the index of an argument based on the call flags.
// addArgc is an out-parameter, indicating whether the value of argc should
// be added to the return value to find the actual index.
inline int32_t GetIndexOfArgument(ArgumentKind kind, CallFlags flags,
bool* addArgc) {
// *** STACK LAYOUT (bottom to top) *** ******** INDEX ********
// Callee <-- argc+1 + isConstructing
// ThisValue <-- argc + isConstructing
// Args: | Arg0 | | ArgArray | <-- argc-1 + isConstructing
// | Arg1 | --or-- | | <-- argc-2 + isConstructing
// | ... | | (if spread | <-- ...
// | ArgN | | call) | <-- 0 + isConstructing
// NewTarget (only if constructing) <-- 0 (if it exists)
//
// If this is a spread call, then argc is always 1, and we can calculate the
// index directly. If this is not a spread call, then the index of any
// argument other than NewTarget depends on argc.
// First we determine whether the caller needs to add argc.
switch (flags.getArgFormat()) {
case CallFlags::Standard:
*addArgc = true;
break;
case CallFlags::Spread:
// Spread calls do not have Arg1 or higher.
MOZ_ASSERT(kind <= ArgumentKind::Arg0);
*addArgc = false;
break;
case CallFlags::Unknown:
case CallFlags::FunCall:
case CallFlags::FunApplyArgsObj:
case CallFlags::FunApplyArray:
case CallFlags::FunApplyNullUndefined:
MOZ_CRASH("Currently unreachable");
break;
}
// Second, we determine the offset relative to argc.
bool hasArgumentArray = !*addArgc;
switch (kind) {
case ArgumentKind::Callee:
return flags.isConstructing() + hasArgumentArray + 1;
case ArgumentKind::This:
return flags.isConstructing() + hasArgumentArray;
case ArgumentKind::Arg0:
return flags.isConstructing() + hasArgumentArray - 1;
case ArgumentKind::Arg1:
return flags.isConstructing() + hasArgumentArray - 2;
case ArgumentKind::Arg2:
return flags.isConstructing() + hasArgumentArray - 3;
case ArgumentKind::Arg3:
return flags.isConstructing() + hasArgumentArray - 4;
case ArgumentKind::Arg4:
return flags.isConstructing() + hasArgumentArray - 5;
case ArgumentKind::Arg5:
return flags.isConstructing() + hasArgumentArray - 6;
case ArgumentKind::Arg6:
return flags.isConstructing() + hasArgumentArray - 7;
case ArgumentKind::Arg7:
return flags.isConstructing() + hasArgumentArray - 8;
case ArgumentKind::NewTarget:
MOZ_ASSERT(flags.isConstructing());
*addArgc = false;
return 0;
default:
MOZ_CRASH("Invalid argument kind");
}
}
// We use this enum as GuardClass operand, instead of storing Class* pointers
// in the IR, to keep the IR compact and the same size on all platforms.
enum class GuardClassKind : uint8_t {
Array,
PlainObject,
FixedLengthArrayBuffer,
ResizableArrayBuffer,
FixedLengthSharedArrayBuffer,
GrowableSharedArrayBuffer,
FixedLengthDataView,
ResizableDataView,
MappedArguments,
UnmappedArguments,
WindowProxy,
JSFunction,
BoundFunction,
Set,
Map,
};
const JSClass* ClassFor(GuardClassKind kind);
enum class ArrayBufferViewKind : uint8_t {
FixedLength,
Resizable,
};
} // namespace jit
} // namespace js
#endif /* jit_CacheIR_h */
|