1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/RangeAnalysis.h"
#include "mozilla/MathAlgorithms.h"
#include <algorithm>
#include "jsmath.h"
#include "jit/CompileInfo.h"
#include "jit/IonAnalysis.h"
#include "jit/JitSpewer.h"
#include "jit/MIR.h"
#include "jit/MIRGenerator.h"
#include "jit/MIRGraph.h"
#include "js/Conversions.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/CheckedArithmetic.h"
#include "vm/ArgumentsObject.h"
#include "vm/TypedArrayObject.h"
#include "vm/Uint8Clamped.h"
#include "vm/BytecodeUtil-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using JS::ToInt32;
using mozilla::Abs;
using mozilla::CountLeadingZeroes32;
using mozilla::ExponentComponent;
using mozilla::FloorLog2;
using mozilla::IsNegativeZero;
using mozilla::NegativeInfinity;
using mozilla::NumberEqualsInt32;
using mozilla::PositiveInfinity;
// [SMDOC] IonMonkey Range Analysis
//
// This algorithm is based on the paper "Eliminating Range Checks Using
// Static Single Assignment Form" by Gough and Klaren.
//
// We associate a range object with each SSA name, and the ranges are consulted
// in order to determine whether overflow is possible for arithmetic
// computations.
//
// An important source of range information that requires care to take
// advantage of is conditional control flow. Consider the code below:
//
// if (x < 0) {
// y = x + 2000000000;
// } else {
// if (x < 1000000000) {
// y = x * 2;
// } else {
// y = x - 3000000000;
// }
// }
//
// The arithmetic operations in this code cannot overflow, but it is not
// sufficient to simply associate each name with a range, since the information
// differs between basic blocks. The traditional dataflow approach would be
// associate ranges with (name, basic block) pairs. This solution is not
// satisfying, since we lose the benefit of SSA form: in SSA form, each
// definition has a unique name, so there is no need to track information about
// the control flow of the program.
//
// The approach used here is to add a new form of pseudo operation called a
// beta node, which associates range information with a value. These beta
// instructions take one argument and additionally have an auxiliary constant
// range associated with them. Operationally, beta nodes are just copies, but
// the invariant expressed by beta node copies is that the output will fall
// inside the range given by the beta node. Gough and Klaeren refer to SSA
// extended with these beta nodes as XSA form. The following shows the example
// code transformed into XSA form:
//
// if (x < 0) {
// x1 = Beta(x, [INT_MIN, -1]);
// y1 = x1 + 2000000000;
// } else {
// x2 = Beta(x, [0, INT_MAX]);
// if (x2 < 1000000000) {
// x3 = Beta(x2, [INT_MIN, 999999999]);
// y2 = x3*2;
// } else {
// x4 = Beta(x2, [1000000000, INT_MAX]);
// y3 = x4 - 3000000000;
// }
// y4 = Phi(y2, y3);
// }
// y = Phi(y1, y4);
//
// We insert beta nodes for the purposes of range analysis (they might also be
// usefully used for other forms of bounds check elimination) and remove them
// after range analysis is performed. The remaining compiler phases do not ever
// encounter beta nodes.
static bool IsDominatedUse(MBasicBlock* block, MUse* use) {
MNode* n = use->consumer();
bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
if (isPhi) {
MPhi* phi = n->toDefinition()->toPhi();
return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
}
return block->dominates(n->block());
}
static inline void SpewRange(MDefinition* def) {
#ifdef JS_JITSPEW
if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None &&
def->range()) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
out.printf(" ");
def->printName(out);
out.printf(" has range ");
def->range()->dump(out);
out.printf("\n");
}
#endif
}
#ifdef JS_JITSPEW
static const char* TruncateKindString(TruncateKind kind) {
switch (kind) {
case TruncateKind::NoTruncate:
return "NoTruncate";
case TruncateKind::TruncateAfterBailouts:
return "TruncateAfterBailouts";
case TruncateKind::IndirectTruncate:
return "IndirectTruncate";
case TruncateKind::Truncate:
return "Truncate";
default:
MOZ_CRASH("Unknown truncate kind.");
}
}
static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
bool shouldClone) {
if (JitSpewEnabled(JitSpew_Range)) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
out.printf(" ");
out.printf("truncating ");
def->printName(out);
out.printf(" (kind: %s, clone: %d)\n", TruncateKindString(kind),
shouldClone);
}
}
#else
static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
bool shouldClone) {}
#endif
TempAllocator& RangeAnalysis::alloc() const { return graph_.alloc(); }
void RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig,
MDefinition* dom,
MBasicBlock* block) {
for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd();) {
MUse* use = *i++;
if (use->consumer() != dom && IsDominatedUse(block, use)) {
use->replaceProducer(dom);
}
}
}
bool RangeAnalysis::addBetaNodes() {
JitSpew(JitSpew_Range, "Adding beta nodes");
for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
MBasicBlock* block = *i;
JitSpew(JitSpew_Range, "Looking at block %u", block->id());
BranchDirection branch_dir;
MTest* test = block->immediateDominatorBranch(&branch_dir);
if (!test || !test->getOperand(0)->isCompare()) {
continue;
}
MCompare* compare = test->getOperand(0)->toCompare();
if (!compare->isNumericComparison()) {
continue;
}
// TODO: support unsigned comparisons
if (compare->compareType() == MCompare::Compare_UInt32) {
continue;
}
// isNumericComparison should return false for UIntPtr.
MOZ_ASSERT(compare->compareType() != MCompare::Compare_UIntPtr);
MDefinition* left = compare->getOperand(0);
MDefinition* right = compare->getOperand(1);
double bound;
double conservativeLower = NegativeInfinity<double>();
double conservativeUpper = PositiveInfinity<double>();
MDefinition* val = nullptr;
JSOp jsop = compare->jsop();
if (branch_dir == FALSE_BRANCH) {
jsop = NegateCompareOp(jsop);
conservativeLower = GenericNaN();
conservativeUpper = GenericNaN();
}
MConstant* leftConst = left->maybeConstantValue();
MConstant* rightConst = right->maybeConstantValue();
if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
bound = leftConst->numberToDouble();
val = right;
jsop = ReverseCompareOp(jsop);
} else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
bound = rightConst->numberToDouble();
val = left;
} else if (left->type() == MIRType::Int32 &&
right->type() == MIRType::Int32) {
MDefinition* smaller = nullptr;
MDefinition* greater = nullptr;
if (jsop == JSOp::Lt) {
smaller = left;
greater = right;
} else if (jsop == JSOp::Gt) {
smaller = right;
greater = left;
}
if (smaller && greater) {
if (!alloc().ensureBallast()) {
return false;
}
MBeta* beta;
beta = MBeta::New(
alloc(), smaller,
Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX - 1));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(smaller, beta, block);
JitSpew(JitSpew_Range, " Adding beta node for smaller %u",
smaller->id());
beta = MBeta::New(
alloc(), greater,
Range::NewInt32Range(alloc(), JSVAL_INT_MIN + 1, JSVAL_INT_MAX));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(greater, beta, block);
JitSpew(JitSpew_Range, " Adding beta node for greater %u",
greater->id());
}
continue;
} else {
continue;
}
// At this point, one of the operands if the compare is a constant, and
// val is the other operand.
MOZ_ASSERT(val);
Range comp;
switch (jsop) {
case JSOp::Le:
comp.setDouble(conservativeLower, bound);
break;
case JSOp::Lt:
// For integers, if x < c, the upper bound of x is c-1.
if (val->type() == MIRType::Int32) {
int32_t intbound;
if (NumberEqualsInt32(bound, &intbound) &&
SafeSub(intbound, 1, &intbound)) {
bound = intbound;
}
}
comp.setDouble(conservativeLower, bound);
// Negative zero is not less than zero.
if (bound == 0) {
comp.refineToExcludeNegativeZero();
}
break;
case JSOp::Ge:
comp.setDouble(bound, conservativeUpper);
break;
case JSOp::Gt:
// For integers, if x > c, the lower bound of x is c+1.
if (val->type() == MIRType::Int32) {
int32_t intbound;
if (NumberEqualsInt32(bound, &intbound) &&
SafeAdd(intbound, 1, &intbound)) {
bound = intbound;
}
}
comp.setDouble(bound, conservativeUpper);
// Negative zero is not greater than zero.
if (bound == 0) {
comp.refineToExcludeNegativeZero();
}
break;
case JSOp::StrictEq:
case JSOp::Eq:
comp.setDouble(bound, bound);
break;
case JSOp::StrictNe:
case JSOp::Ne:
// Negative zero is not not-equal to zero.
if (bound == 0) {
comp.refineToExcludeNegativeZero();
break;
}
continue; // well, we could have
// [-\inf, bound-1] U [bound+1, \inf] but we only use
// contiguous ranges.
default:
continue;
}
if (JitSpewEnabled(JitSpew_Range)) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
out.printf(" Adding beta node for %u with range ", val->id());
comp.dump(out);
out.printf("\n");
}
if (!alloc().ensureBallast()) {
return false;
}
MBeta* beta = MBeta::New(alloc(), val, new (alloc()) Range(comp));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(val, beta, block);
}
return true;
}
bool RangeAnalysis::removeBetaNodes() {
JitSpew(JitSpew_Range, "Removing beta nodes");
for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
MBasicBlock* block = *i;
for (MDefinitionIterator iter(*i); iter;) {
MDefinition* def = *iter++;
if (def->isBeta()) {
auto* beta = def->toBeta();
MDefinition* op = beta->input();
JitSpew(JitSpew_Range, " Removing beta node %u for %u", beta->id(),
op->id());
beta->justReplaceAllUsesWith(op);
block->discard(beta);
} else {
// We only place Beta nodes at the beginning of basic
// blocks, so if we see something else, we can move on
// to the next block.
break;
}
}
}
return true;
}
void SymbolicBound::dump(GenericPrinter& out) const {
if (loop) {
out.printf("[loop] ");
}
sum.dump(out);
}
void SymbolicBound::dump() const {
Fprinter out(stderr);
dump(out);
out.printf("\n");
out.finish();
}
// Test whether the given range's exponent tells us anything that its lower
// and upper bound values don't.
static bool IsExponentInteresting(const Range* r) {
// If it lacks either a lower or upper bound, the exponent is interesting.
if (!r->hasInt32Bounds()) {
return true;
}
// Otherwise if there's no fractional part, the lower and upper bounds,
// which are integers, are perfectly precise.
if (!r->canHaveFractionalPart()) {
return false;
}
// Otherwise, if the bounds are conservatively rounded across a power-of-two
// boundary, the exponent may imply a tighter range.
return FloorLog2(std::max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
}
void Range::dump(GenericPrinter& out) const {
assertInvariants();
// Floating-point or Integer subset.
if (canHaveFractionalPart_) {
out.printf("F");
} else {
out.printf("I");
}
out.printf("[");
if (!hasInt32LowerBound_) {
out.printf("?");
} else {
out.printf("%d", lower_);
}
if (symbolicLower_) {
out.printf(" {");
symbolicLower_->dump(out);
out.printf("}");
}
out.printf(", ");
if (!hasInt32UpperBound_) {
out.printf("?");
} else {
out.printf("%d", upper_);
}
if (symbolicUpper_) {
out.printf(" {");
symbolicUpper_->dump(out);
out.printf("}");
}
out.printf("]");
bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
bool includesNegativeInfinity =
max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
bool includesPositiveInfinity =
max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
bool includesNegativeZero = canBeNegativeZero_;
if (includesNaN || includesNegativeInfinity || includesPositiveInfinity ||
includesNegativeZero) {
out.printf(" (");
bool first = true;
if (includesNaN) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U NaN");
}
if (includesNegativeInfinity) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U -Infinity");
}
if (includesPositiveInfinity) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U Infinity");
}
if (includesNegativeZero) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U -0");
}
out.printf(")");
}
if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this)) {
out.printf(" (< pow(2, %d+1))", max_exponent_);
}
}
void Range::dump() const {
Fprinter out(stderr);
dump(out);
out.printf("\n");
out.finish();
}
Range* Range::intersect(TempAllocator& alloc, const Range* lhs,
const Range* rhs, bool* emptyRange) {
*emptyRange = false;
if (!lhs && !rhs) {
return nullptr;
}
if (!lhs) {
return new (alloc) Range(*rhs);
}
if (!rhs) {
return new (alloc) Range(*lhs);
}
int32_t newLower = std::max(lhs->lower_, rhs->lower_);
int32_t newUpper = std::min(lhs->upper_, rhs->upper_);
// If upper < lower, then we have conflicting constraints. Consider:
//
// if (x < 0) {
// if (x > 0) {
// [Some code.]
// }
// }
//
// In this case, the block is unreachable.
if (newUpper < newLower) {
// If both ranges can be NaN, the result can still be NaN.
if (!lhs->canBeNaN() || !rhs->canBeNaN()) {
*emptyRange = true;
}
return nullptr;
}
bool newHasInt32LowerBound =
lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
bool newHasInt32UpperBound =
lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(lhs->canBeNegativeZero_ && rhs->canBeNegativeZero_);
uint16_t newExponent = std::min(lhs->max_exponent_, rhs->max_exponent_);
// NaN is a special value which is neither greater than infinity or less than
// negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
// can end up thinking we have both a lower and upper bound, even though NaN
// is still possible. In this case, just be conservative, since any case where
// we can have NaN is not especially interesting.
if (newHasInt32LowerBound && newHasInt32UpperBound &&
newExponent == IncludesInfinityAndNaN) {
return nullptr;
}
// If one of the ranges has a fractional part and the other doesn't, it's
// possible that we will have computed a newExponent that's more precise
// than our newLower and newUpper. This is unusual, so we handle it here
// instead of in optimize().
//
// For example, consider the range F[0,1.5]. Range analysis represents the
// lower and upper bound as integers, so we'd actually have
// F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
// more precise upper bound than the integer upper bound.
//
// When intersecting such a range with an integer range, the fractional part
// of the range is dropped. The max exponent of 0 remains valid, so the
// upper bound needs to be adjusted to 1.
//
// When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
// the naive intersection is I[2,2], but since the max exponent tells us
// that the value is always less than 2, the intersection is actually empty.
if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
(lhs->canHaveFractionalPart() && newHasInt32LowerBound &&
newHasInt32UpperBound && newLower == newUpper)) {
refineInt32BoundsByExponent(newExponent, &newLower, &newHasInt32LowerBound,
&newUpper, &newHasInt32UpperBound);
// If we're intersecting two ranges that don't overlap, this could also
// push the bounds past each other, since the actual intersection is
// the empty set.
if (newLower > newUpper) {
*emptyRange = true;
return nullptr;
}
}
return new (alloc)
Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
newCanHaveFractionalPart, newMayIncludeNegativeZero, newExponent);
}
void Range::unionWith(const Range* other) {
int32_t newLower = std::min(lower_, other->lower_);
int32_t newUpper = std::max(upper_, other->upper_);
bool newHasInt32LowerBound =
hasInt32LowerBound_ && other->hasInt32LowerBound_;
bool newHasInt32UpperBound =
hasInt32UpperBound_ && other->hasInt32UpperBound_;
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
canHaveFractionalPart_ || other->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(canBeNegativeZero_ || other->canBeNegativeZero_);
uint16_t newExponent = std::max(max_exponent_, other->max_exponent_);
rawInitialize(newLower, newHasInt32LowerBound, newUpper,
newHasInt32UpperBound, newCanHaveFractionalPart,
newMayIncludeNegativeZero, newExponent);
}
Range::Range(const MDefinition* def)
: symbolicLower_(nullptr), symbolicUpper_(nullptr) {
if (const Range* other = def->range()) {
// The instruction has range information; use it.
*this = *other;
// Simulate the effect of converting the value to its type.
// Note: we cannot clamp here, since ranges aren't allowed to shrink
// and truncation can increase range again. So doing wrapAround to
// mimick a possible truncation.
switch (def->type()) {
case MIRType::Int32:
// MToNumberInt32 cannot truncate. So we can safely clamp.
if (def->isToNumberInt32()) {
clampToInt32();
} else {
wrapAroundToInt32();
}
break;
case MIRType::Boolean:
wrapAroundToBoolean();
break;
case MIRType::None:
MOZ_CRASH("Asking for the range of an instruction with no value");
default:
break;
}
} else {
// Otherwise just use type information. We can trust the type here
// because we don't care what value the instruction actually produces,
// but what value we might get after we get past the bailouts.
switch (def->type()) {
case MIRType::Int32:
setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
break;
case MIRType::Boolean:
setInt32(0, 1);
break;
case MIRType::None:
MOZ_CRASH("Asking for the range of an instruction with no value");
default:
setUnknown();
break;
}
}
// As a special case, MUrsh is permitted to claim a result type of
// MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
// bailouts. If range analysis hasn't ruled out values in
// (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
// use as either a uint32 or an int32.
if (!hasInt32UpperBound() && def->isUrsh() &&
def->toUrsh()->bailoutsDisabled() && def->type() != MIRType::Int64) {
lower_ = INT32_MIN;
}
assertInvariants();
}
static uint16_t ExponentImpliedByDouble(double d) {
// Handle the special values.
if (std::isnan(d)) {
return Range::IncludesInfinityAndNaN;
}
if (std::isinf(d)) {
return Range::IncludesInfinity;
}
// Otherwise take the exponent part and clamp it at zero, since the Range
// class doesn't track fractional ranges.
return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d)));
}
void Range::setDouble(double l, double h) {
MOZ_ASSERT(!(l > h));
// Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
if (l >= INT32_MIN && l <= INT32_MAX) {
lower_ = int32_t(::floor(l));
hasInt32LowerBound_ = true;
} else if (l >= INT32_MAX) {
lower_ = INT32_MAX;
hasInt32LowerBound_ = true;
} else {
lower_ = INT32_MIN;
hasInt32LowerBound_ = false;
}
if (h >= INT32_MIN && h <= INT32_MAX) {
upper_ = int32_t(::ceil(h));
hasInt32UpperBound_ = true;
} else if (h <= INT32_MIN) {
upper_ = INT32_MIN;
hasInt32UpperBound_ = true;
} else {
upper_ = INT32_MAX;
hasInt32UpperBound_ = false;
}
// Infer max_exponent_.
uint16_t lExp = ExponentImpliedByDouble(l);
uint16_t hExp = ExponentImpliedByDouble(h);
max_exponent_ = std::max(lExp, hExp);
canHaveFractionalPart_ = ExcludesFractionalParts;
canBeNegativeZero_ = ExcludesNegativeZero;
// Infer the canHaveFractionalPart_ setting. We can have a
// fractional part if the range crosses through the neighborhood of zero. We
// won't have a fractional value if the value is always beyond the point at
// which double precision can't represent fractional values.
uint16_t minExp = std::min(lExp, hExp);
bool includesNegative = std::isnan(l) || l < 0;
bool includesPositive = std::isnan(h) || h > 0;
bool crossesZero = includesNegative && includesPositive;
if (crossesZero || minExp < MaxTruncatableExponent) {
canHaveFractionalPart_ = IncludesFractionalParts;
}
// Infer the canBeNegativeZero_ setting. We can have a negative zero if
// either bound is zero.
if (!(l > 0) && !(h < 0)) {
canBeNegativeZero_ = IncludesNegativeZero;
}
optimize();
}
void Range::setDoubleSingleton(double d) {
setDouble(d, d);
// The above setDouble call is for comparisons, and treats negative zero
// as equal to zero. We're aiming for a minimum range, so we can clear the
// negative zero flag if the value isn't actually negative zero.
if (!IsNegativeZero(d)) {
canBeNegativeZero_ = ExcludesNegativeZero;
}
assertInvariants();
}
static inline bool MissingAnyInt32Bounds(const Range* lhs, const Range* rhs) {
return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
}
Range* Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
int64_t l = (int64_t)lhs->lower_ + (int64_t)rhs->lower_;
if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) {
l = NoInt32LowerBound;
}
int64_t h = (int64_t)lhs->upper_ + (int64_t)rhs->upper_;
if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) {
h = NoInt32UpperBound;
}
// The exponent is at most one greater than the greater of the operands'
// exponents, except for NaN and infinity cases.
uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
if (e <= Range::MaxFiniteExponent) {
++e;
}
// Infinity + -Infinity is NaN.
if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
e = Range::IncludesInfinityAndNaN;
}
return new (alloc) Range(
l, h,
FractionalPartFlag(lhs->canHaveFractionalPart() ||
rhs->canHaveFractionalPart()),
NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeNegativeZero()),
e);
}
Range* Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
int64_t l = (int64_t)lhs->lower_ - (int64_t)rhs->upper_;
if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) {
l = NoInt32LowerBound;
}
int64_t h = (int64_t)lhs->upper_ - (int64_t)rhs->lower_;
if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) {
h = NoInt32UpperBound;
}
// The exponent is at most one greater than the greater of the operands'
// exponents, except for NaN and infinity cases.
uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
if (e <= Range::MaxFiniteExponent) {
++e;
}
// Infinity - Infinity is NaN.
if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
e = Range::IncludesInfinityAndNaN;
}
return new (alloc)
Range(l, h,
FractionalPartFlag(lhs->canHaveFractionalPart() ||
rhs->canHaveFractionalPart()),
NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeZero()), e);
}
Range* Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
// If both numbers can be negative, result can be negative in the whole range
if (lhs->lower() < 0 && rhs->lower() < 0) {
return Range::NewInt32Range(alloc, INT32_MIN,
std::max(lhs->upper(), rhs->upper()));
}
// Only one of both numbers can be negative.
// - result can't be negative
// - Upper bound is minimum of both upper range,
int32_t lower = 0;
int32_t upper = std::min(lhs->upper(), rhs->upper());
// EXCEPT when upper bound of non negative number is max value,
// because negative value can return the whole max value.
// -1 & 5 = 5
if (lhs->lower() < 0) {
upper = rhs->upper();
}
if (rhs->lower() < 0) {
upper = lhs->upper();
}
return Range::NewInt32Range(alloc, lower, upper);
}
Range* Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
// When one operand is always 0 or always -1, it's a special case where we
// can compute a fully precise result. Handling these up front also
// protects the code below from calling CountLeadingZeroes32 with a zero
// operand or from shifting an int32_t by 32.
if (lhs->lower() == lhs->upper()) {
if (lhs->lower() == 0) {
return new (alloc) Range(*rhs);
}
if (lhs->lower() == -1) {
return new (alloc) Range(*lhs);
}
}
if (rhs->lower() == rhs->upper()) {
if (rhs->lower() == 0) {
return new (alloc) Range(*lhs);
}
if (rhs->lower() == -1) {
return new (alloc) Range(*rhs);
}
}
// The code below uses CountLeadingZeroes32, which has undefined behavior
// if its operand is 0. We rely on the code above to protect it.
MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
int32_t lower = INT32_MIN;
int32_t upper = INT32_MAX;
if (lhs->lower() >= 0 && rhs->lower() >= 0) {
// Both operands are non-negative, so the result won't be less than either.
lower = std::max(lhs->lower(), rhs->lower());
// The result will have leading zeros where both operands have leading
// zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
// account for the bit of sign.
upper = int32_t(UINT32_MAX >> std::min(CountLeadingZeroes32(lhs->upper()),
CountLeadingZeroes32(rhs->upper())));
} else {
// The result will have leading ones where either operand has leading ones.
if (lhs->upper() < 0) {
unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
upper = -1;
}
if (rhs->upper() < 0) {
unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
upper = -1;
}
}
return Range::NewInt32Range(alloc, lower, upper);
}
Range* Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
int32_t lhsLower = lhs->lower();
int32_t lhsUpper = lhs->upper();
int32_t rhsLower = rhs->lower();
int32_t rhsUpper = rhs->upper();
bool invertAfter = false;
// If either operand is negative, bitwise-negate it, and arrange to negate
// the result; ~((~x)^y) == x^y. If both are negative the negations on the
// result cancel each other out; effectively this is (~x)^(~y) == x^y.
// These transformations reduce the number of cases we have to handle below.
if (lhsUpper < 0) {
lhsLower = ~lhsLower;
lhsUpper = ~lhsUpper;
std::swap(lhsLower, lhsUpper);
invertAfter = !invertAfter;
}
if (rhsUpper < 0) {
rhsLower = ~rhsLower;
rhsUpper = ~rhsUpper;
std::swap(rhsLower, rhsUpper);
invertAfter = !invertAfter;
}
// Handle cases where lhs or rhs is always zero specially, because they're
// easy cases where we can be perfectly precise, and because it protects the
// CountLeadingZeroes32 calls below from seeing 0 operands, which would be
// undefined behavior.
int32_t lower = INT32_MIN;
int32_t upper = INT32_MAX;
if (lhsLower == 0 && lhsUpper == 0) {
upper = rhsUpper;
lower = rhsLower;
} else if (rhsLower == 0 && rhsUpper == 0) {
upper = lhsUpper;
lower = lhsLower;
} else if (lhsLower >= 0 && rhsLower >= 0) {
// Both operands are non-negative. The result will be non-negative.
lower = 0;
// To compute the upper value, take each operand's upper value and
// set all bits that don't correspond to leading zero bits in the
// other to one. For each one, this gives an upper bound for the
// result, so we can take the minimum between the two.
unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
upper = std::min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
}
// If we bitwise-negated one (but not both) of the operands above, apply the
// bitwise-negate to the result, completing ~((~x)^y) == x^y.
if (invertAfter) {
lower = ~lower;
upper = ~upper;
std::swap(lower, upper);
}
return Range::NewInt32Range(alloc, lower, upper);
}
Range* Range::not_(TempAllocator& alloc, const Range* op) {
MOZ_ASSERT(op->isInt32());
return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
}
Range* Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(
(lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
(rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
uint16_t exponent;
if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
// Two finite values.
exponent = lhs->numBits() + rhs->numBits() - 1;
if (exponent > Range::MaxFiniteExponent) {
exponent = Range::IncludesInfinity;
}
} else if (!lhs->canBeNaN() && !rhs->canBeNaN() &&
!(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
!(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) {
// Two values that multiplied together won't produce a NaN.
exponent = Range::IncludesInfinity;
} else {
// Could be anything.
exponent = Range::IncludesInfinityAndNaN;
}
if (MissingAnyInt32Bounds(lhs, rhs)) {
return new (alloc)
Range(NoInt32LowerBound, NoInt32UpperBound, newCanHaveFractionalPart,
newMayIncludeNegativeZero, exponent);
}
int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
return new (alloc)
Range(std::min(std::min(a, b), std::min(c, d)),
std::max(std::max(a, b), std::max(c, d)), newCanHaveFractionalPart,
newMayIncludeNegativeZero, exponent);
}
Range* Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
MOZ_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
// If the shift doesn't loose bits or shift bits into the sign bit, we
// can simply compute the correct range by shifting.
if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) ==
lhs->lower() &&
(int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) ==
lhs->upper()) {
return Range::NewInt32Range(alloc, uint32_t(lhs->lower()) << shift,
uint32_t(lhs->upper()) << shift);
}
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
}
Range* Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
MOZ_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
return Range::NewInt32Range(alloc, lhs->lower() >> shift,
lhs->upper() >> shift);
}
Range* Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c) {
// ursh's left operand is uint32, not int32, but for range analysis we
// currently approximate it as int32. We assume here that the range has
// already been adjusted accordingly by our callers.
MOZ_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
// If the value is always non-negative or always negative, we can simply
// compute the correct range by shifting.
if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
return Range::NewUInt32Range(alloc, uint32_t(lhs->lower()) >> shift,
uint32_t(lhs->upper()) >> shift);
}
// Otherwise return the most general range after the shift.
return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
}
Range* Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
}
Range* Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
// Canonicalize the shift range to 0 to 31.
int32_t shiftLower = rhs->lower();
int32_t shiftUpper = rhs->upper();
if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
shiftLower = 0;
shiftUpper = 31;
} else {
shiftLower &= 0x1f;
shiftUpper &= 0x1f;
if (shiftLower > shiftUpper) {
shiftLower = 0;
shiftUpper = 31;
}
}
MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
// The lhs bounds are signed, thus the minimum is either the lower bound
// shift by the smallest shift if negative or the lower bound shifted by the
// biggest shift otherwise. And the opposite for the maximum.
int32_t lhsLower = lhs->lower();
int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
int32_t lhsUpper = lhs->upper();
int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
return Range::NewInt32Range(alloc, min, max);
}
Range* Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
// ursh's left operand is uint32, not int32, but for range analysis we
// currently approximate it as int32. We assume here that the range has
// already been adjusted accordingly by our callers.
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
return Range::NewUInt32Range(
alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
}
Range* Range::abs(TempAllocator& alloc, const Range* op) {
int32_t l = op->lower_;
int32_t u = op->upper_;
FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
// Abs never produces a negative zero.
NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
return new (alloc) Range(
std::max(std::max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), true,
std::max(std::max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
op->hasInt32Bounds() && l != INT32_MIN, canHaveFractionalPart,
canBeNegativeZero, op->max_exponent_);
}
Range* Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
// If either operand is NaN, the result is NaN.
if (lhs->canBeNaN() || rhs->canBeNaN()) {
return nullptr;
}
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
return new (alloc) Range(std::min(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
std::min(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
newCanHaveFractionalPart, newMayIncludeNegativeZero,
std::max(lhs->max_exponent_, rhs->max_exponent_));
}
Range* Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
// If either operand is NaN, the result is NaN.
if (lhs->canBeNaN() || rhs->canBeNaN()) {
return nullptr;
}
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
return new (alloc) Range(std::max(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
std::max(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
newCanHaveFractionalPart, newMayIncludeNegativeZero,
std::max(lhs->max_exponent_, rhs->max_exponent_));
}
Range* Range::floor(TempAllocator& alloc, const Range* op) {
Range* copy = new (alloc) Range(*op);
// Decrement lower bound of copy range if op have a factional part and lower
// bound is Int32 defined. Also we avoid to decrement when op have a
// fractional part but lower_ >= JSVAL_INT_MAX.
if (op->canHaveFractionalPart() && op->hasInt32LowerBound()) {
copy->setLowerInit(int64_t(copy->lower_) - 1);
}
// Also refine max_exponent_ because floor may have decremented int value
// If we've got int32 defined bounds, just deduce it using defined bounds.
// But, if we don't have those, value's max_exponent_ may have changed.
// Because we're looking to maintain an over estimation, if we can,
// we increment it.
if (copy->hasInt32Bounds())
copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
else if (copy->max_exponent_ < MaxFiniteExponent)
copy->max_exponent_++;
copy->canHaveFractionalPart_ = ExcludesFractionalParts;
copy->assertInvariants();
return copy;
}
Range* Range::ceil(TempAllocator& alloc, const Range* op) {
Range* copy = new (alloc) Range(*op);
// We need to refine max_exponent_ because ceil may have incremented the int
// value. If we have got int32 bounds defined, just deduce it using the
// defined bounds. Else we can just increment its value, as we are looking to
// maintain an over estimation.
if (copy->hasInt32Bounds()) {
copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
} else if (copy->max_exponent_ < MaxFiniteExponent) {
copy->max_exponent_++;
}
// If the range is definitely above 0 or below -1, we don't need to include
// -0; otherwise we do.
copy->canBeNegativeZero_ = ((copy->lower_ > 0) || (copy->upper_ <= -1))
? copy->canBeNegativeZero_
: IncludesNegativeZero;
copy->canHaveFractionalPart_ = ExcludesFractionalParts;
copy->assertInvariants();
return copy;
}
Range* Range::sign(TempAllocator& alloc, const Range* op) {
if (op->canBeNaN()) {
return nullptr;
}
return new (alloc) Range(std::max(std::min(op->lower_, 1), -1),
std::max(std::min(op->upper_, 1), -1),
Range::ExcludesFractionalParts,
NegativeZeroFlag(op->canBeNegativeZero()), 0);
}
Range* Range::NaNToZero(TempAllocator& alloc, const Range* op) {
Range* copy = new (alloc) Range(*op);
if (copy->canBeNaN()) {
copy->max_exponent_ = Range::IncludesInfinity;
if (!copy->canBeZero()) {
Range zero;
zero.setDoubleSingleton(0);
copy->unionWith(&zero);
}
}
copy->refineToExcludeNegativeZero();
return copy;
}
bool Range::negativeZeroMul(const Range* lhs, const Range* rhs) {
// The result can only be negative zero if both sides are finite and they
// have differing signs.
return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
(rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
}
bool Range::update(const Range* other) {
bool changed = lower_ != other->lower_ ||
hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
upper_ != other->upper_ ||
hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
canBeNegativeZero_ != other->canBeNegativeZero_ ||
max_exponent_ != other->max_exponent_;
if (changed) {
lower_ = other->lower_;
hasInt32LowerBound_ = other->hasInt32LowerBound_;
upper_ = other->upper_;
hasInt32UpperBound_ = other->hasInt32UpperBound_;
canHaveFractionalPart_ = other->canHaveFractionalPart_;
canBeNegativeZero_ = other->canBeNegativeZero_;
max_exponent_ = other->max_exponent_;
assertInvariants();
}
return changed;
}
///////////////////////////////////////////////////////////////////////////////
// Range Computation for MIR Nodes
///////////////////////////////////////////////////////////////////////////////
void MPhi::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range* range = nullptr;
for (size_t i = 0, e = numOperands(); i < e; i++) {
if (getOperand(i)->block()->unreachable()) {
JitSpew(JitSpew_Range, "Ignoring unreachable input %u",
getOperand(i)->id());
continue;
}
// Peek at the pre-bailout range so we can take a short-cut; if any of
// the operands has an unknown range, this phi has an unknown range.
if (!getOperand(i)->range()) {
return;
}
Range input(getOperand(i));
if (range) {
range->unionWith(&input);
} else {
range = new (alloc) Range(input);
}
}
setRange(range);
}
void MBeta::computeRange(TempAllocator& alloc) {
bool emptyRange = false;
Range opRange(getOperand(0));
Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
if (emptyRange) {
JitSpew(JitSpew_Range, "Marking block for inst %u unreachable", id());
block()->setUnreachableUnchecked();
} else {
setRange(range);
}
}
void MConstant::computeRange(TempAllocator& alloc) {
if (isTypeRepresentableAsDouble()) {
double d = numberToDouble();
setRange(Range::NewDoubleSingletonRange(alloc, d));
} else if (type() == MIRType::Boolean) {
bool b = toBoolean();
setRange(Range::NewInt32Range(alloc, b, b));
}
}
void MCharCodeAt::computeRange(TempAllocator& alloc) {
// ECMA 262 says that the integer will be non-negative and at most 65535.
setRange(Range::NewInt32Range(alloc, 0, 65535));
}
void MClampToUint8::computeRange(TempAllocator& alloc) {
setRange(Range::NewUInt32Range(alloc, 0, 255));
}
void MBitAnd::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::and_(alloc, &left, &right));
}
void MBitOr::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::or_(alloc, &left, &right));
}
void MBitXor::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::xor_(alloc, &left, &right));
}
void MBitNot::computeRange(TempAllocator& alloc) {
if (type() == MIRType::Int64) {
return;
}
MOZ_ASSERT(type() == MIRType::Int32);
Range op(getOperand(0));
op.wrapAroundToInt32();
setRange(Range::not_(alloc, &op));
}
void MLsh::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::lsh(alloc, &left, c));
return;
}
right.wrapAroundToShiftCount();
setRange(Range::lsh(alloc, &left, &right));
}
void MRsh::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::rsh(alloc, &left, c));
return;
}
right.wrapAroundToShiftCount();
setRange(Range::rsh(alloc, &left, &right));
}
void MUrsh::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
// ursh can be thought of as converting its left operand to uint32, or it
// can be thought of as converting its left operand to int32, and then
// reinterpreting the int32 bits as a uint32 value. Both approaches yield
// the same result. Since we lack support for full uint32 ranges, we use
// the second interpretation, though it does cause us to be conservative.
left.wrapAroundToInt32();
right.wrapAroundToShiftCount();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::ursh(alloc, &left, c));
} else {
setRange(Range::ursh(alloc, &left, &right));
}
MOZ_ASSERT(range()->lower() >= 0);
}
void MAbs::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range other(getOperand(0));
Range* next = Range::abs(alloc, &other);
if (implicitTruncate_) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MFloor::computeRange(TempAllocator& alloc) {
Range other(getOperand(0));
setRange(Range::floor(alloc, &other));
}
void MCeil::computeRange(TempAllocator& alloc) {
Range other(getOperand(0));
setRange(Range::ceil(alloc, &other));
}
void MClz::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
setRange(Range::NewUInt32Range(alloc, 0, 32));
}
void MCtz::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
setRange(Range::NewUInt32Range(alloc, 0, 32));
}
void MPopcnt::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
setRange(Range::NewUInt32Range(alloc, 0, 32));
}
void MMinMax::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
setRange(isMax() ? Range::max(alloc, &left, &right)
: Range::min(alloc, &left, &right));
}
void MAdd::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
Range* next = Range::add(alloc, &left, &right);
if (isTruncated()) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MSub::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
Range* next = Range::sub(alloc, &left, &right);
if (isTruncated()) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MMul::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
if (canBeNegativeZero()) {
canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
}
Range* next = Range::mul(alloc, &left, &right);
if (!next->canBeNegativeZero()) {
canBeNegativeZero_ = false;
}
// Truncated multiplications could overflow in both directions
if (isTruncated()) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MMod::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range lhs(getOperand(0));
Range rhs(getOperand(1));
// If either operand is a NaN, the result is NaN. This also conservatively
// handles Infinity cases.
if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
return;
}
// If RHS can be zero, the result can be NaN.
if (rhs.lower() <= 0 && rhs.upper() >= 0) {
return;
}
// If both operands are non-negative integers, we can optimize this to an
// unsigned mod.
if (type() == MIRType::Int32 && rhs.lower() > 0) {
bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
rhs.canHaveFractionalPart();
// It is not possible to check that lhs.lower() >= 0, since the range
// of a ursh with rhs a 0 constant is wrapped around the int32 range in
// Range::Range(). However, IsUint32Type() will only return true for
// nodes that lie in the range [0, UINT32_MAX].
bool hasUint32s =
IsUint32Type(getOperand(0)) &&
getOperand(1)->type() == MIRType::Int32 &&
(IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
if (!hasDoubles || hasUint32s) {
unsigned_ = true;
}
}
// For unsigned mod, we have to convert both operands to unsigned.
// Note that we handled the case of a zero rhs above.
if (unsigned_) {
// The result of an unsigned mod will never be unsigned-greater than
// either operand.
uint32_t lhsBound = std::max<uint32_t>(lhs.lower(), lhs.upper());
uint32_t rhsBound = std::max<uint32_t>(rhs.lower(), rhs.upper());
// If either range crosses through -1 as a signed value, it could be
// the maximum unsigned value when interpreted as unsigned. If the range
// doesn't include -1, then the simple max value we computed above is
// correct.
if (lhs.lower() <= -1 && lhs.upper() >= -1) {
lhsBound = UINT32_MAX;
}
if (rhs.lower() <= -1 && rhs.upper() >= -1) {
rhsBound = UINT32_MAX;
}
// The result will never be equal to the rhs, and we shouldn't have
// any rounding to worry about.
MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
--rhsBound;
// This gives us two upper bounds, so we can take the best one.
setRange(Range::NewUInt32Range(alloc, 0, std::min(lhsBound, rhsBound)));
return;
}
// Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
// First, the absolute value of the result will always be less than the
// absolute value of rhs. (And if rhs is zero, the result is NaN).
int64_t a = Abs<int64_t>(rhs.lower());
int64_t b = Abs<int64_t>(rhs.upper());
if (a == 0 && b == 0) {
return;
}
int64_t rhsAbsBound = std::max(a, b);
// If the value is known to be integer, less-than abs(rhs) is equivalent
// to less-than-or-equal abs(rhs)-1. This is important for being able to
// say that the result of x%256 is an 8-bit unsigned number.
if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) {
--rhsAbsBound;
}
// Next, the absolute value of the result will never be greater than the
// absolute value of lhs.
int64_t lhsAbsBound =
std::max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
// This gives us two upper bounds, so we can take the best one.
int64_t absBound = std::min(lhsAbsBound, rhsAbsBound);
// Now consider the sign of the result.
// If lhs is non-negative, the result will be non-negative.
// If lhs is non-positive, the result will be non-positive.
int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
Range::FractionalPartFlag newCanHaveFractionalPart =
Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
rhs.canHaveFractionalPart());
// If the lhs can have the sign bit set and we can return a zero, it'll be a
// negative zero.
Range::NegativeZeroFlag newMayIncludeNegativeZero =
Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
setRange(new (alloc) Range(lower, upper, newCanHaveFractionalPart,
newMayIncludeNegativeZero,
std::min(lhs.exponent(), rhs.exponent())));
}
void MDiv::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range lhs(getOperand(0));
Range rhs(getOperand(1));
// If either operand is a NaN, the result is NaN. This also conservatively
// handles Infinity cases.
if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
return;
}
// Something simple for now: When dividing by a positive rhs, the result
// won't be further from zero than lhs.
if (lhs.lower() >= 0 && rhs.lower() >= 1) {
setRange(new (alloc) Range(0, lhs.upper(), Range::IncludesFractionalParts,
Range::IncludesNegativeZero, lhs.exponent()));
} else if (unsigned_ && rhs.lower() >= 1) {
// We shouldn't set the unsigned flag if the inputs can have
// fractional parts.
MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
// We shouldn't set the unsigned flag if the inputs can be
// negative zero.
MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
// Unsigned division by a non-zero rhs will return a uint32 value.
setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
}
}
void MSqrt::computeRange(TempAllocator& alloc) {
Range input(getOperand(0));
// If either operand is a NaN, the result is NaN. This also conservatively
// handles Infinity cases.
if (!input.hasInt32Bounds()) {
return;
}
// Sqrt of a negative non-zero value is NaN.
if (input.lower() < 0) {
return;
}
// Something simple for now: When taking the sqrt of a positive value, the
// result won't be further from zero than the input.
// And, sqrt of an integer may have a fractional part.
setRange(new (alloc) Range(0, input.upper(), Range::IncludesFractionalParts,
input.canBeNegativeZero(), input.exponent()));
}
void MToDouble::computeRange(TempAllocator& alloc) {
setRange(new (alloc) Range(getOperand(0)));
}
void MToFloat32::computeRange(TempAllocator& alloc) {}
void MTruncateToInt32::computeRange(TempAllocator& alloc) {
Range* output = new (alloc) Range(getOperand(0));
output->wrapAroundToInt32();
setRange(output);
}
void MToNumberInt32::computeRange(TempAllocator& alloc) {
// No clamping since this computes the range *before* bailouts.
setRange(new (alloc) Range(getOperand(0)));
}
void MBooleanToInt32::computeRange(TempAllocator& alloc) {
setRange(Range::NewUInt32Range(alloc, 0, 1));
}
void MLimitedTruncate::computeRange(TempAllocator& alloc) {
Range* output = new (alloc) Range(input());
setRange(output);
}
static Range* GetArrayBufferViewRange(TempAllocator& alloc, Scalar::Type type) {
switch (type) {
case Scalar::Uint8Clamped:
case Scalar::Uint8:
return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
case Scalar::Uint16:
return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
case Scalar::Uint32:
return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
case Scalar::Int8:
return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
case Scalar::Int16:
return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
case Scalar::Int32:
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::Int64:
case Scalar::Simd128:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::MaxTypedArrayViewType:
break;
}
return nullptr;
}
void MLoadUnboxedScalar::computeRange(TempAllocator& alloc) {
// We have an Int32 type and if this is a UInt32 load it may produce a value
// outside of our range, but we have a bailout to handle those cases.
setRange(GetArrayBufferViewRange(alloc, storageType()));
}
void MLoadDataViewElement::computeRange(TempAllocator& alloc) {
// We have an Int32 type and if this is a UInt32 load it may produce a value
// outside of our range, but we have a bailout to handle those cases.
setRange(GetArrayBufferViewRange(alloc, storageType()));
}
void MArrayLength::computeRange(TempAllocator& alloc) {
// Array lengths can go up to UINT32_MAX. We will bail out if the array
// length > INT32_MAX.
MOZ_ASSERT(type() == MIRType::Int32);
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
void MInitializedLength::computeRange(TempAllocator& alloc) {
setRange(
Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
}
void MArrayBufferViewLength::computeRange(TempAllocator& alloc) {
if constexpr (ArrayBufferObject::MaxByteLength <= INT32_MAX) {
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
}
void MArrayBufferViewByteOffset::computeRange(TempAllocator& alloc) {
if constexpr (ArrayBufferObject::MaxByteLength <= INT32_MAX) {
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
}
void MTypedArrayElementSize::computeRange(TempAllocator& alloc) {
constexpr auto MaxTypedArraySize = sizeof(double);
#define ASSERT_MAX_SIZE(_, T, N) \
static_assert(sizeof(T) <= MaxTypedArraySize, \
"unexpected typed array type exceeding 64-bits storage");
JS_FOR_EACH_TYPED_ARRAY(ASSERT_MAX_SIZE)
#undef ASSERT_MAX_SIZE
setRange(Range::NewUInt32Range(alloc, 0, MaxTypedArraySize));
}
void MStringLength::computeRange(TempAllocator& alloc) {
static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
"NewUInt32Range requires a uint32 value");
setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
}
void MArgumentsLength::computeRange(TempAllocator& alloc) {
// This is is a conservative upper bound on what |TooManyActualArguments|
// checks. If exceeded, Ion will not be entered in the first place.
static_assert(ARGS_LENGTH_MAX <= UINT32_MAX,
"NewUInt32Range requires a uint32 value");
setRange(Range::NewUInt32Range(alloc, 0, ARGS_LENGTH_MAX));
}
void MBoundsCheck::computeRange(TempAllocator& alloc) {
// Just transfer the incoming index range to the output. The length() is
// also interesting, but it is handled as a bailout check, and we're
// computing a pre-bailout range here.
setRange(new (alloc) Range(index()));
}
void MSpectreMaskIndex::computeRange(TempAllocator& alloc) {
// Just transfer the incoming index range to the output for now.
setRange(new (alloc) Range(index()));
}
void MInt32ToIntPtr::computeRange(TempAllocator& alloc) {
setRange(new (alloc) Range(input()));
}
void MNonNegativeIntPtrToInt32::computeRange(TempAllocator& alloc) {
// We will bail out if the IntPtr value > INT32_MAX.
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
void MArrayPush::computeRange(TempAllocator& alloc) {
// MArrayPush returns the new array length. It bails out if the new length
// doesn't fit in an Int32.
MOZ_ASSERT(type() == MIRType::Int32);
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
void MMathFunction::computeRange(TempAllocator& alloc) {
Range opRange(getOperand(0));
switch (function()) {
case UnaryMathFunction::SinNative:
case UnaryMathFunction::SinFdlibm:
case UnaryMathFunction::CosNative:
case UnaryMathFunction::CosFdlibm:
if (!opRange.canBeInfiniteOrNaN()) {
setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
}
break;
default:
break;
}
}
void MSign::computeRange(TempAllocator& alloc) {
Range opRange(getOperand(0));
setRange(Range::sign(alloc, &opRange));
}
void MRandom::computeRange(TempAllocator& alloc) {
Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
// Random never returns negative zero.
r->refineToExcludeNegativeZero();
setRange(r);
}
void MNaNToZero::computeRange(TempAllocator& alloc) {
Range other(input());
setRange(Range::NaNToZero(alloc, &other));
}
///////////////////////////////////////////////////////////////////////////////
// Range Analysis
///////////////////////////////////////////////////////////////////////////////
static BranchDirection NegateBranchDirection(BranchDirection dir) {
return (dir == FALSE_BRANCH) ? TRUE_BRANCH : FALSE_BRANCH;
}
bool RangeAnalysis::analyzeLoop(MBasicBlock* header) {
MOZ_ASSERT(header->hasUniqueBackedge());
// Try to compute an upper bound on the number of times the loop backedge
// will be taken. Look for tests that dominate the backedge and which have
// an edge leaving the loop body.
MBasicBlock* backedge = header->backedge();
// Ignore trivial infinite loops.
if (backedge == header) {
return true;
}
bool canOsr;
size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
// Ignore broken loops.
if (numBlocks == 0) {
return true;
}
LoopIterationBound* iterationBound = nullptr;
MBasicBlock* block = backedge;
do {
BranchDirection direction;
MTest* branch = block->immediateDominatorBranch(&direction);
if (block == block->immediateDominator()) {
break;
}
block = block->immediateDominator();
if (branch) {
direction = NegateBranchDirection(direction);
MBasicBlock* otherBlock = branch->branchSuccessor(direction);
if (!otherBlock->isMarked()) {
if (!alloc().ensureBallast()) {
return false;
}
iterationBound = analyzeLoopIterationCount(header, branch, direction);
if (iterationBound) {
break;
}
}
}
} while (block != header);
if (!iterationBound) {
UnmarkLoopBlocks(graph_, header);
return true;
}
if (!loopIterationBounds.append(iterationBound)) {
return false;
}
#ifdef DEBUG
if (JitSpewEnabled(JitSpew_Range)) {
Sprinter sp(GetJitContext()->cx);
if (!sp.init()) {
return false;
}
iterationBound->boundSum.dump(sp);
JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
sp.string());
}
#endif
// Try to compute symbolic bounds for the phi nodes at the head of this
// loop, expressed in terms of the iteration bound just computed.
for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd();
iter++) {
analyzeLoopPhi(iterationBound, *iter);
}
if (!mir->compilingWasm() && !mir->outerInfo().hadBoundsCheckBailout()) {
// Try to hoist any bounds checks from the loop using symbolic bounds.
Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
for (ReversePostorderIterator iter(graph_.rpoBegin(header));
iter != graph_.rpoEnd(); iter++) {
MBasicBlock* block = *iter;
if (!block->isMarked()) {
continue;
}
for (MDefinitionIterator iter(block); iter; iter++) {
MDefinition* def = *iter;
if (def->isBoundsCheck() && def->isMovable()) {
if (!alloc().ensureBallast()) {
return false;
}
if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
if (!hoistedChecks.append(def->toBoundsCheck())) {
return false;
}
}
}
}
}
// Note: replace all uses of the original bounds check with the
// actual index. This is usually done during bounds check elimination,
// but in this case it's safe to do it here since the load/store is
// definitely not loop-invariant, so we will never move it before
// one of the bounds checks we just added.
for (size_t i = 0; i < hoistedChecks.length(); i++) {
MBoundsCheck* ins = hoistedChecks[i];
ins->replaceAllUsesWith(ins->index());
ins->block()->discard(ins);
}
}
UnmarkLoopBlocks(graph_, header);
return true;
}
// Unbox beta nodes in order to hoist instruction properly, and not be limited
// by the beta nodes which are added after each branch.
static inline MDefinition* DefinitionOrBetaInputDefinition(MDefinition* ins) {
while (ins->isBeta()) {
ins = ins->toBeta()->input();
}
return ins;
}
LoopIterationBound* RangeAnalysis::analyzeLoopIterationCount(
MBasicBlock* header, MTest* test, BranchDirection direction) {
SimpleLinearSum lhs(nullptr, 0);
MDefinition* rhs;
bool lessEqual;
if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) {
return nullptr;
}
// Ensure the rhs is a loop invariant term.
if (rhs && rhs->block()->isMarked()) {
if (lhs.term && lhs.term->block()->isMarked()) {
return nullptr;
}
MDefinition* temp = lhs.term;
lhs.term = rhs;
rhs = temp;
if (!SafeSub(0, lhs.constant, &lhs.constant)) {
return nullptr;
}
lessEqual = !lessEqual;
}
MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
// Ensure the lhs is a phi node from the start of the loop body.
if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) {
return nullptr;
}
// Check that the value of the lhs changes by a constant amount with each
// loop iteration. This requires that the lhs be written in every loop
// iteration with a value that is a constant difference from its value at
// the start of the iteration.
if (lhs.term->toPhi()->numOperands() != 2) {
return nullptr;
}
// The first operand of the phi should be the lhs' value at the start of
// the first executed iteration, and not a value written which could
// replace the second operand below during the middle of execution.
MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
if (lhsInitial->block()->isMarked()) {
return nullptr;
}
// The second operand of the phi should be a value written by an add/sub
// in every loop iteration, i.e. in a block which dominates the backedge.
MDefinition* lhsWrite = DefinitionOrBetaInputDefinition(
lhs.term->toPhi()->getLoopBackedgeOperand());
if (!lhsWrite->isAdd() && !lhsWrite->isSub()) {
return nullptr;
}
if (!lhsWrite->block()->isMarked()) {
return nullptr;
}
MBasicBlock* bb = header->backedge();
for (; bb != lhsWrite->block() && bb != header;
bb = bb->immediateDominator()) {
}
if (bb != lhsWrite->block()) {
return nullptr;
}
SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
// Check that the value of the lhs at the backedge is of the form
// 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
// of the iteration, and not that written to lhs in a previous iteration,
// as such a previous value could not appear directly in the addition:
// it could not be stored in lhs as the lhs add/sub executes in every
// iteration, and if it were stored in another variable its use here would
// be as an operand to a phi node for that variable.
if (lhsModified.term != lhs.term) {
return nullptr;
}
LinearSum iterationBound(alloc());
LinearSum currentIteration(alloc());
if (lhsModified.constant == 1 && !lessEqual) {
// The value of lhs is 'initial(lhs) + iterCount' and this will end
// execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
// on the number of backedges executed is:
//
// initial(lhs) + iterCount + lhsN == rhs
// iterCount == rhsN - initial(lhs) - lhsN
if (rhs) {
if (!iterationBound.add(rhs, 1)) {
return nullptr;
}
}
if (!iterationBound.add(lhsInitial, -1)) {
return nullptr;
}
int32_t lhsConstant;
if (!SafeSub(0, lhs.constant, &lhsConstant)) {
return nullptr;
}
if (!iterationBound.add(lhsConstant)) {
return nullptr;
}
if (!currentIteration.add(lhs.term, 1)) {
return nullptr;
}
if (!currentIteration.add(lhsInitial, -1)) {
return nullptr;
}
} else if (lhsModified.constant == -1 && lessEqual) {
// The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
// case, an upper bound on the number of backedges executed is:
//
// initial(lhs) - iterCount + lhsN == rhs
// iterCount == initial(lhs) - rhs + lhsN
if (!iterationBound.add(lhsInitial, 1)) {
return nullptr;
}
if (rhs) {
if (!iterationBound.add(rhs, -1)) {
return nullptr;
}
}
if (!iterationBound.add(lhs.constant)) {
return nullptr;
}
if (!currentIteration.add(lhsInitial, 1)) {
return nullptr;
}
if (!currentIteration.add(lhs.term, -1)) {
return nullptr;
}
} else {
return nullptr;
}
return new (alloc())
LoopIterationBound(header, test, iterationBound, currentIteration);
}
void RangeAnalysis::analyzeLoopPhi(LoopIterationBound* loopBound, MPhi* phi) {
// Given a bound on the number of backedges taken, compute an upper and
// lower bound for a phi node that may change by a constant amount each
// iteration. Unlike for the case when computing the iteration bound
// itself, the phi does not need to change the same amount every iteration,
// but is required to change at most N and be either nondecreasing or
// nonincreasing.
MOZ_ASSERT(phi->numOperands() == 2);
MDefinition* initial = phi->getLoopPredecessorOperand();
if (initial->block()->isMarked()) {
return;
}
SimpleLinearSum modified =
ExtractLinearSum(phi->getLoopBackedgeOperand(), MathSpace::Infinite);
if (modified.term != phi || modified.constant == 0) {
return;
}
if (!phi->range()) {
phi->setRange(new (alloc()) Range(phi));
}
LinearSum initialSum(alloc());
if (!initialSum.add(initial, 1)) {
return;
}
// The phi may change by N each iteration, and is either nondecreasing or
// nonincreasing. initial(phi) is either a lower or upper bound for the
// phi, and initial(phi) + loopBound * N is either an upper or lower bound,
// at all points within the loop, provided that loopBound >= 0.
//
// We are more interested, however, in the bound for phi at points
// dominated by the loop bound's test; if the test dominates e.g. a bounds
// check we want to hoist from the loop, using the value of the phi at the
// head of the loop for this will usually be too imprecise to hoist the
// check. These points will execute only if the backedge executes at least
// one more time (as the test passed and the test dominates the backedge),
// so we know both that loopBound >= 1 and that the phi's value has changed
// at most loopBound - 1 times. Thus, another upper or lower bound for the
// phi is initial(phi) + (loopBound - 1) * N, without requiring us to
// ensure that loopBound >= 0.
LinearSum limitSum(loopBound->boundSum);
if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) {
return;
}
int32_t negativeConstant;
if (!SafeSub(0, modified.constant, &negativeConstant) ||
!limitSum.add(negativeConstant)) {
return;
}
Range* initRange = initial->range();
if (modified.constant > 0) {
if (initRange && initRange->hasInt32LowerBound()) {
phi->range()->refineLower(initRange->lower());
}
phi->range()->setSymbolicLower(
SymbolicBound::New(alloc(), nullptr, initialSum));
phi->range()->setSymbolicUpper(
SymbolicBound::New(alloc(), loopBound, limitSum));
} else {
if (initRange && initRange->hasInt32UpperBound()) {
phi->range()->refineUpper(initRange->upper());
}
phi->range()->setSymbolicUpper(
SymbolicBound::New(alloc(), nullptr, initialSum));
phi->range()->setSymbolicLower(
SymbolicBound::New(alloc(), loopBound, limitSum));
}
JitSpew(JitSpew_Range, "added symbolic range on %u", phi->id());
SpewRange(phi);
}
// Whether bound is valid at the specified bounds check instruction in a loop,
// and may be used to hoist ins.
static inline bool SymbolicBoundIsValid(MBasicBlock* header, MBoundsCheck* ins,
const SymbolicBound* bound) {
if (!bound->loop) {
return true;
}
if (ins->block() == header) {
return false;
}
MBasicBlock* bb = ins->block()->immediateDominator();
while (bb != header && bb != bound->loop->test->block()) {
bb = bb->immediateDominator();
}
return bb == bound->loop->test->block();
}
bool RangeAnalysis::tryHoistBoundsCheck(MBasicBlock* header,
MBoundsCheck* ins) {
// The bounds check's length must be loop invariant or a constant.
MDefinition* length = DefinitionOrBetaInputDefinition(ins->length());
if (length->block()->isMarked() && !length->isConstant()) {
return false;
}
// The bounds check's index should not be loop invariant (else we would
// already have hoisted it during LICM).
SimpleLinearSum index = ExtractLinearSum(ins->index());
if (!index.term || !index.term->block()->isMarked()) {
return false;
}
// Check for a symbolic lower and upper bound on the index. If either
// condition depends on an iteration bound for the loop, only hoist if
// the bounds check is dominated by the iteration bound's test.
if (!index.term->range()) {
return false;
}
const SymbolicBound* lower = index.term->range()->symbolicLower();
if (!lower || !SymbolicBoundIsValid(header, ins, lower)) {
return false;
}
const SymbolicBound* upper = index.term->range()->symbolicUpper();
if (!upper || !SymbolicBoundIsValid(header, ins, upper)) {
return false;
}
MBasicBlock* preLoop = header->loopPredecessor();
MOZ_ASSERT(!preLoop->isMarked());
MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum,
BailoutKind::HoistBoundsCheck);
if (!lowerTerm) {
return false;
}
MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum,
BailoutKind::HoistBoundsCheck);
if (!upperTerm) {
return false;
}
// We are checking that index + indexConstant >= 0, and know that
// index >= lowerTerm + lowerConstant. Thus, check that:
//
// lowerTerm + lowerConstant + indexConstant >= 0
// lowerTerm >= -lowerConstant - indexConstant
int32_t lowerConstant = 0;
if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) {
return false;
}
if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) {
return false;
}
// We are checking that index < boundsLength, and know that
// index <= upperTerm + upperConstant. Thus, check that:
//
// upperTerm + upperConstant < boundsLength
int32_t upperConstant = index.constant;
if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) {
return false;
}
// Hoist the loop invariant lower bounds checks.
MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
lowerCheck->setMinimum(lowerConstant);
lowerCheck->computeRange(alloc());
lowerCheck->collectRangeInfoPreTrunc();
lowerCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
// A common pattern for iterating over typed arrays is this:
//
// for (var i = 0; i < ta.length; i++) {
// use ta[i];
// }
//
// Here |upperTerm| (= ta.length) is a NonNegativeIntPtrToInt32 instruction.
// Unwrap this if |length| is also an IntPtr so that we don't add an
// unnecessary bounds check and Int32ToIntPtr below.
if (upperTerm->isNonNegativeIntPtrToInt32() &&
length->type() == MIRType::IntPtr) {
upperTerm = upperTerm->toNonNegativeIntPtrToInt32()->input();
}
// Hoist the loop invariant upper bounds checks.
if (upperTerm != length || upperConstant >= 0) {
// Hoist the bound check's length if it isn't already loop invariant.
if (length->block()->isMarked()) {
MOZ_ASSERT(length->isConstant());
MInstruction* lengthIns = length->toInstruction();
lengthIns->block()->moveBefore(preLoop->lastIns(), lengthIns);
}
// If the length is IntPtr, convert the upperTerm to that as well for the
// bounds check.
if (length->type() == MIRType::IntPtr &&
upperTerm->type() == MIRType::Int32) {
upperTerm = MInt32ToIntPtr::New(alloc(), upperTerm);
upperTerm->computeRange(alloc());
upperTerm->collectRangeInfoPreTrunc();
preLoop->insertBefore(preLoop->lastIns(), upperTerm->toInstruction());
}
MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
upperCheck->setMinimum(upperConstant);
upperCheck->setMaximum(upperConstant);
upperCheck->computeRange(alloc());
upperCheck->collectRangeInfoPreTrunc();
upperCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
preLoop->insertBefore(preLoop->lastIns(), upperCheck);
}
return true;
}
bool RangeAnalysis::analyze() {
JitSpew(JitSpew_Range, "Doing range propagation");
for (ReversePostorderIterator iter(graph_.rpoBegin());
iter != graph_.rpoEnd(); iter++) {
MBasicBlock* block = *iter;
// No blocks are supposed to be unreachable, except when we have an OSR
// block, in which case the Value Numbering phase add fixup blocks which
// are unreachable.
MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
// If the block's immediate dominator is unreachable, the block is
// unreachable. Iterating in RPO, we'll always see the immediate
// dominator before the block.
if (block->immediateDominator()->unreachable()) {
block->setUnreachableUnchecked();
continue;
}
for (MDefinitionIterator iter(block); iter; iter++) {
MDefinition* def = *iter;
if (!alloc().ensureBallast()) {
return false;
}
def->computeRange(alloc());
JitSpew(JitSpew_Range, "computing range on %u", def->id());
SpewRange(def);
}
// Beta node range analysis may have marked this block unreachable. If
// so, it's no longer interesting to continue processing it.
if (block->unreachable()) {
continue;
}
if (block->isLoopHeader()) {
if (!analyzeLoop(block)) {
return false;
}
}
// First pass at collecting range info - while the beta nodes are still
// around and before truncation.
for (MInstructionIterator iter(block->begin()); iter != block->end();
iter++) {
iter->collectRangeInfoPreTrunc();
}
}
return true;
}
bool RangeAnalysis::addRangeAssertions() {
if (!JitOptions.checkRangeAnalysis) {
return true;
}
// Check the computed range for this instruction, if the option is set. Note
// that this code is quite invasive; it adds numerous additional
// instructions for each MInstruction with a computed range, and it uses
// registers, so it also affects register allocation.
for (ReversePostorderIterator iter(graph_.rpoBegin());
iter != graph_.rpoEnd(); iter++) {
MBasicBlock* block = *iter;
// Do not add assertions in unreachable blocks.
if (block->unreachable()) {
continue;
}
for (MDefinitionIterator iter(block); iter; iter++) {
MDefinition* ins = *iter;
// Perform range checking for all numeric and numeric-like types.
if (!IsNumberType(ins->type()) && ins->type() != MIRType::Boolean &&
ins->type() != MIRType::Value && ins->type() != MIRType::IntPtr) {
continue;
}
// MIsNoIter is fused with the MTest that follows it and emitted as
// LIsNoIterAndBranch. Similarly, MIteratorHasIndices is fused to
// become LIteratorHasIndicesAndBranch. Skip them to avoid complicating
// lowering.
if (ins->isIsNoIter() || ins->isIteratorHasIndices()) {
MOZ_ASSERT(ins->hasOneUse());
continue;
}
Range r(ins);
MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
// Don't insert assertions if there's nothing interesting to assert.
if (r.isUnknown() ||
(ins->type() == MIRType::Int32 && r.isUnknownInt32())) {
continue;
}
// Don't add a use to an instruction that is recovered on bailout.
if (ins->isRecoveredOnBailout()) {
continue;
}
if (!alloc().ensureBallast()) {
return false;
}
MAssertRange* guard =
MAssertRange::New(alloc(), ins, new (alloc()) Range(r));
// Beta nodes and interrupt checks are required to be located at the
// beginnings of basic blocks, so we must insert range assertions
// after any such instructions.
MInstruction* insertAt = nullptr;
if (block->graph().osrBlock() == block) {
insertAt = ins->toInstruction();
} else {
insertAt = block->safeInsertTop(ins);
}
if (insertAt == *iter) {
block->insertAfter(insertAt, guard);
} else {
block->insertBefore(insertAt, guard);
}
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Range based Truncation
///////////////////////////////////////////////////////////////////////////////
void Range::clampToInt32() {
if (isInt32()) {
return;
}
int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
setInt32(l, h);
}
void Range::wrapAroundToInt32() {
if (!hasInt32Bounds()) {
setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
} else if (canHaveFractionalPart()) {
// Clearing the fractional field may provide an opportunity to refine
// lower_ or upper_.
canHaveFractionalPart_ = ExcludesFractionalParts;
canBeNegativeZero_ = ExcludesNegativeZero;
refineInt32BoundsByExponent(max_exponent_, &lower_, &hasInt32LowerBound_,
&upper_, &hasInt32UpperBound_);
assertInvariants();
} else {
// If nothing else, we can clear the negative zero flag.
canBeNegativeZero_ = ExcludesNegativeZero;
}
MOZ_ASSERT(isInt32());
}
void Range::wrapAroundToShiftCount() {
wrapAroundToInt32();
if (lower() < 0 || upper() >= 32) {
setInt32(0, 31);
}
}
void Range::wrapAroundToBoolean() {
wrapAroundToInt32();
if (!isBoolean()) {
setInt32(0, 1);
}
MOZ_ASSERT(isBoolean());
}
bool MDefinition::canTruncate() const {
// No procedure defined for truncating this instruction.
return false;
}
void MDefinition::truncate(TruncateKind kind) {
MOZ_CRASH("No procedure defined for truncating this instruction.");
}
bool MConstant::canTruncate() const { return IsFloatingPointType(type()); }
void MConstant::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
// Truncate the double to int, since all uses truncates it.
int32_t res = ToInt32(numberToDouble());
payload_.asBits = 0;
payload_.i32 = res;
setResultType(MIRType::Int32);
if (range()) {
range()->setInt32(res, res);
}
}
bool MPhi::canTruncate() const {
return type() == MIRType::Double || type() == MIRType::Int32;
}
void MPhi::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
truncateKind_ = kind;
setResultType(MIRType::Int32);
if (kind >= TruncateKind::IndirectTruncate && range()) {
range()->wrapAroundToInt32();
}
}
bool MAdd::canTruncate() const {
return type() == MIRType::Double || type() == MIRType::Int32;
}
void MAdd::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
setSpecialization(MIRType::Int32);
if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
range()->wrapAroundToInt32();
}
}
bool MSub::canTruncate() const {
return type() == MIRType::Double || type() == MIRType::Int32;
}
void MSub::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
setSpecialization(MIRType::Int32);
if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
range()->wrapAroundToInt32();
}
}
bool MMul::canTruncate() const {
return type() == MIRType::Double || type() == MIRType::Int32;
}
void MMul::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
setSpecialization(MIRType::Int32);
if (truncateKind() >= TruncateKind::IndirectTruncate) {
setCanBeNegativeZero(false);
if (range()) {
range()->wrapAroundToInt32();
}
}
}
bool MDiv::canTruncate() const {
return type() == MIRType::Double || type() == MIRType::Int32;
}
void MDiv::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
setSpecialization(MIRType::Int32);
// Divisions where the lhs and rhs are unsigned and the result is
// truncated can be lowered more efficiently.
if (unsignedOperands()) {
replaceWithUnsignedOperands();
unsigned_ = true;
}
}
bool MMod::canTruncate() const {
return type() == MIRType::Double || type() == MIRType::Int32;
}
void MMod::truncate(TruncateKind kind) {
// As for division, handle unsigned modulus with a truncated result.
MOZ_ASSERT(canTruncate());
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
setSpecialization(MIRType::Int32);
if (unsignedOperands()) {
replaceWithUnsignedOperands();
unsigned_ = true;
}
}
bool MToDouble::canTruncate() const {
MOZ_ASSERT(type() == MIRType::Double);
return true;
}
void MToDouble::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
setTruncateKind(kind);
// We use the return type to flag that this MToDouble should be replaced by
// a MTruncateToInt32 when modifying the graph.
setResultType(MIRType::Int32);
if (truncateKind() >= TruncateKind::IndirectTruncate) {
if (range()) {
range()->wrapAroundToInt32();
}
}
}
bool MLimitedTruncate::canTruncate() const { return true; }
void MLimitedTruncate::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
setTruncateKind(kind);
setResultType(MIRType::Int32);
if (kind >= TruncateKind::IndirectTruncate && range()) {
range()->wrapAroundToInt32();
}
}
bool MCompare::canTruncate() const {
if (!isDoubleComparison()) {
return false;
}
// If both operands are naturally in the int32 range, we can convert from
// a double comparison to being an int32 comparison.
if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) {
return false;
}
return true;
}
void MCompare::truncate(TruncateKind kind) {
MOZ_ASSERT(canTruncate());
compareType_ = Compare_Int32;
// Truncating the operands won't change their value because we don't force a
// truncation, but it will change their type, which we need because we
// now expect integer inputs.
truncateOperands_ = true;
}
TruncateKind MDefinition::operandTruncateKind(size_t index) const {
// Generic routine: We don't know anything.
return TruncateKind::NoTruncate;
}
TruncateKind MPhi::operandTruncateKind(size_t index) const {
// The truncation applied to a phi is effectively applied to the phi's
// operands.
return truncateKind_;
}
TruncateKind MTruncateToInt32::operandTruncateKind(size_t index) const {
// This operator is an explicit truncate to int32.
return TruncateKind::Truncate;
}
TruncateKind MBinaryBitwiseInstruction::operandTruncateKind(
size_t index) const {
// The bitwise operators truncate to int32.
return TruncateKind::Truncate;
}
TruncateKind MLimitedTruncate::operandTruncateKind(size_t index) const {
return std::min(truncateKind(), truncateLimit_);
}
TruncateKind MAdd::operandTruncateKind(size_t index) const {
// This operator is doing some arithmetic. If its result is truncated,
// it's an indirect truncate for its operands.
return std::min(truncateKind(), TruncateKind::IndirectTruncate);
}
TruncateKind MSub::operandTruncateKind(size_t index) const {
// See the comment in MAdd::operandTruncateKind.
return std::min(truncateKind(), TruncateKind::IndirectTruncate);
}
TruncateKind MMul::operandTruncateKind(size_t index) const {
// See the comment in MAdd::operandTruncateKind.
return std::min(truncateKind(), TruncateKind::IndirectTruncate);
}
TruncateKind MToDouble::operandTruncateKind(size_t index) const {
// MToDouble propagates its truncate kind to its operand.
return truncateKind();
}
TruncateKind MStoreUnboxedScalar::operandTruncateKind(size_t index) const {
// An integer store truncates the stored value.
return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
: TruncateKind::NoTruncate;
}
TruncateKind MStoreDataViewElement::operandTruncateKind(size_t index) const {
// An integer store truncates the stored value.
return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
: TruncateKind::NoTruncate;
}
TruncateKind MStoreTypedArrayElementHole::operandTruncateKind(
size_t index) const {
// An integer store truncates the stored value.
return (index == 3 && isIntegerWrite()) ? TruncateKind::Truncate
: TruncateKind::NoTruncate;
}
TruncateKind MDiv::operandTruncateKind(size_t index) const {
return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
}
TruncateKind MMod::operandTruncateKind(size_t index) const {
return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
}
TruncateKind MCompare::operandTruncateKind(size_t index) const {
// If we're doing an int32 comparison on operands which were previously
// floating-point, convert them!
MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
return truncateOperands_ ? TruncateKind::TruncateAfterBailouts
: TruncateKind::NoTruncate;
}
static bool TruncateTest(TempAllocator& alloc, MTest* test) {
// If all possible inputs to the test are either int32 or boolean,
// convert those inputs to int32 so that an int32 test can be performed.
if (test->input()->type() != MIRType::Value) {
return true;
}
if (!test->input()->isPhi() || !test->input()->hasOneDefUse() ||
test->input()->isImplicitlyUsed()) {
return true;
}
MPhi* phi = test->input()->toPhi();
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition* def = phi->getOperand(i);
if (!def->isBox()) {
return true;
}
MDefinition* inner = def->getOperand(0);
if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32) {
return true;
}
}
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition* inner = phi->getOperand(i)->getOperand(0);
if (inner->type() != MIRType::Int32) {
if (!alloc.ensureBallast()) {
return false;
}
MBasicBlock* block = inner->block();
inner = MToNumberInt32::New(alloc, inner);
block->insertBefore(block->lastIns(), inner->toInstruction());
}
MOZ_ASSERT(inner->type() == MIRType::Int32);
phi->replaceOperand(i, inner);
}
phi->setResultType(MIRType::Int32);
return true;
}
// Truncating instruction result is an optimization which implies
// knowing all uses of an instruction. This implies that if one of
// the uses got removed, then Range Analysis is not be allowed to do
// any modification which can change the result, especially if the
// result can be observed.
//
// This corner can easily be understood with UCE examples, but it
// might also happen with type inference assumptions. Note: Type
// inference is implicitly branches where other types might be
// flowing into.
static bool CloneForDeadBranches(TempAllocator& alloc,
MInstruction* candidate) {
// Compare returns a boolean so it doesn't have to be recovered on bailout
// because the output would remain correct.
if (candidate->isCompare()) {
return true;
}
MOZ_ASSERT(candidate->canClone());
if (!alloc.ensureBallast()) {
return false;
}
MDefinitionVector operands(alloc);
size_t end = candidate->numOperands();
if (!operands.reserve(end)) {
return false;
}
for (size_t i = 0; i < end; ++i) {
operands.infallibleAppend(candidate->getOperand(i));
}
MInstruction* clone = candidate->clone(alloc, operands);
if (!clone) {
return false;
}
clone->setRange(nullptr);
// Set ImplicitlyUsed flag on the cloned instruction in order to chain recover
// instruction for the bailout path.
clone->setImplicitlyUsedUnchecked();
candidate->block()->insertBefore(candidate, clone);
if (!candidate->maybeConstantValue()) {
MOZ_ASSERT(clone->canRecoverOnBailout());
clone->setRecoveredOnBailout();
}
// Replace the candidate by its recovered on bailout clone within recovered
// instructions and resume points operands.
for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd();) {
MUse* use = *i++;
MNode* ins = use->consumer();
if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout()) {
continue;
}
use->replaceProducer(clone);
}
return true;
}
// Examine all the users of |candidate| and determine the most aggressive
// truncate kind that satisfies all of them.
static TruncateKind ComputeRequestedTruncateKind(MDefinition* candidate,
bool* shouldClone) {
bool isCapturedResult =
false; // Check if used by a recovered instruction or a resume point.
bool isObservableResult =
false; // Check if it can be read from another frame.
bool isRecoverableResult = true; // Check if it can safely be reconstructed.
bool isImplicitlyUsed = candidate->isImplicitlyUsed();
bool hasTryBlock = candidate->block()->graph().hasTryBlock();
TruncateKind kind = TruncateKind::Truncate;
for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd();
use++) {
if (use->consumer()->isResumePoint()) {
// Truncation is a destructive optimization, as such, we need to pay
// attention to removed branches and prevent optimization
// destructive optimizations if we have no alternative. (see
// ImplicitlyUsed flag)
isCapturedResult = true;
isObservableResult =
isObservableResult ||
use->consumer()->toResumePoint()->isObservableOperand(*use);
isRecoverableResult =
isRecoverableResult &&
use->consumer()->toResumePoint()->isRecoverableOperand(*use);
continue;
}
MDefinition* consumer = use->consumer()->toDefinition();
if (consumer->isRecoveredOnBailout()) {
isCapturedResult = true;
isImplicitlyUsed = isImplicitlyUsed || consumer->isImplicitlyUsed();
continue;
}
TruncateKind consumerKind =
consumer->operandTruncateKind(consumer->indexOf(*use));
kind = std::min(kind, consumerKind);
if (kind == TruncateKind::NoTruncate) {
break;
}
}
// We cannot do full trunction on guarded instructions.
if (candidate->isGuard() || candidate->isGuardRangeBailouts()) {
kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
}
// If the value naturally produces an int32 value (before bailout checks)
// that needs no conversion, we don't have to worry about resume points
// seeing truncated values.
bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
// If the instruction is explicitly truncated (not indirectly) by all its
// uses and if it is not implicitly used, then we can safely encode its
// truncated result as part of the resume point operands. This is safe,
// because even if we resume with a truncated double, the next baseline
// instruction operating on this instruction is going to be a no-op.
//
// Note, that if the result can be observed from another frame, then this
// optimization is not safe. Similarly, if this function contains a try
// block, the result could be observed from a catch block, which we do
// not compile.
bool safeToConvert = kind == TruncateKind::Truncate && !isImplicitlyUsed &&
!isObservableResult && !hasTryBlock;
// If the candidate instruction appears as operand of a resume point or a
// recover instruction, and we have to truncate its result, then we might
// have to either recover the result during the bailout, or avoid the
// truncation.
if (isCapturedResult && needsConversion && !safeToConvert) {
// If the result can be recovered from all the resume points (not needed
// for iterating over the inlined frames), and this instruction can be
// recovered on bailout, then we can clone it and use the cloned
// instruction to encode the recover instruction. Otherwise, we should
// keep the original result and bailout if the value is not in the int32
// range.
if (!JitOptions.disableRecoverIns && isRecoverableResult &&
candidate->canRecoverOnBailout()) {
*shouldClone = true;
} else {
kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
}
}
return kind;
}
static TruncateKind ComputeTruncateKind(MDefinition* candidate,
bool* shouldClone) {
// Compare operations might coerce its inputs to int32 if the ranges are
// correct. So we do not need to check if all uses are coerced.
if (candidate->isCompare()) {
return TruncateKind::TruncateAfterBailouts;
}
// Set truncated flag if range analysis ensure that it has no
// rounding errors and no fractional part. Note that we can't use
// the MDefinition Range constructor, because we need to know if
// the value will have rounding errors before any bailout checks.
const Range* r = candidate->range();
bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
// Special case integer division and modulo: a/b can be infinite, and a%b
// can be NaN but cannot actually have rounding errors induced by truncation.
if ((candidate->isDiv() || candidate->isMod()) &&
candidate->type() == MIRType::Int32) {
canHaveRoundingErrors = false;
}
if (canHaveRoundingErrors) {
return TruncateKind::NoTruncate;
}
// Ensure all observable uses are truncated.
return ComputeRequestedTruncateKind(candidate, shouldClone);
}
static void RemoveTruncatesOnOutput(MDefinition* truncated) {
// Compare returns a boolean so it doen't have any output truncates.
if (truncated->isCompare()) {
return;
}
MOZ_ASSERT(truncated->type() == MIRType::Int32);
MOZ_ASSERT(Range(truncated).isInt32());
for (MUseDefIterator use(truncated); use; use++) {
MDefinition* def = use.def();
if (!def->isTruncateToInt32() || !def->isToNumberInt32()) {
continue;
}
def->replaceAllUsesWith(truncated);
}
}
void RangeAnalysis::adjustTruncatedInputs(MDefinition* truncated) {
MBasicBlock* block = truncated->block();
for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
TruncateKind kind = truncated->operandTruncateKind(i);
if (kind == TruncateKind::NoTruncate) {
continue;
}
MDefinition* input = truncated->getOperand(i);
if (input->type() == MIRType::Int32) {
continue;
}
if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
truncated->replaceOperand(i, input->getOperand(0));
} else {
MInstruction* op;
if (kind == TruncateKind::TruncateAfterBailouts) {
MOZ_ASSERT(!mir->outerInfo().hadEagerTruncationBailout());
op = MToNumberInt32::New(alloc(), truncated->getOperand(i));
op->setBailoutKind(BailoutKind::EagerTruncation);
} else {
op = MTruncateToInt32::New(alloc(), truncated->getOperand(i));
}
if (truncated->isPhi()) {
MBasicBlock* pred = block->getPredecessor(i);
pred->insertBefore(pred->lastIns(), op);
} else {
block->insertBefore(truncated->toInstruction(), op);
}
truncated->replaceOperand(i, op);
}
}
if (truncated->isToDouble()) {
truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
block->discard(truncated->toToDouble());
}
}
bool RangeAnalysis::canTruncate(MDefinition* def, TruncateKind kind) const {
if (kind == TruncateKind::NoTruncate) {
return false;
}
// Range Analysis is sometimes eager to do optimizations, even if we
// are not able to truncate an instruction. In such case, we
// speculatively compile the instruction to an int32 instruction
// while adding a guard. This is what is implied by
// TruncateAfterBailout.
//
// If a previous compilation was invalidated because a speculative
// truncation bailed out, we no longer attempt to make this kind of
// eager optimization.
if (mir->outerInfo().hadEagerTruncationBailout()) {
if (kind == TruncateKind::TruncateAfterBailouts) {
return false;
}
// MDiv and MMod always require TruncateAfterBailout for their operands.
// See MDiv::operandTruncateKind and MMod::operandTruncateKind.
if (def->isDiv() || def->isMod()) {
return false;
}
}
return true;
}
// Iterate backward on all instruction and attempt to truncate operations for
// each instruction which respect the following list of predicates: Has been
// analyzed by range analysis, the range has no rounding errors, all uses cases
// are truncating the result.
//
// If the truncation of the operation is successful, then the instruction is
// queue for later updating the graph to restore the type correctness by
// converting the operands that need to be truncated.
//
// We iterate backward because it is likely that a truncated operation truncates
// some of its operands.
bool RangeAnalysis::truncate() {
JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
// Automatic truncation is disabled for wasm because the truncation logic
// is based on IonMonkey which assumes that we can bailout if the truncation
// logic fails. As wasm code has no bailout mechanism, it is safer to avoid
// any automatic truncations.
MOZ_ASSERT(!mir->compilingWasm());
Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd();
block++) {
for (MInstructionReverseIterator iter(block->rbegin());
iter != block->rend(); iter++) {
if (iter->isRecoveredOnBailout()) {
continue;
}
if (iter->type() == MIRType::None) {
if (iter->isTest()) {
if (!TruncateTest(alloc(), iter->toTest())) {
return false;
}
}
continue;
}
// Remember all bitop instructions for folding after range analysis.
switch (iter->op()) {
case MDefinition::Opcode::BitAnd:
case MDefinition::Opcode::BitOr:
case MDefinition::Opcode::BitXor:
case MDefinition::Opcode::Lsh:
case MDefinition::Opcode::Rsh:
case MDefinition::Opcode::Ursh:
if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter))) {
return false;
}
break;
default:;
}
bool shouldClone = false;
TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
// Truncate this instruction if possible.
if (!canTruncate(*iter, kind) || !iter->canTruncate()) {
continue;
}
SpewTruncate(*iter, kind, shouldClone);
// If needed, clone the current instruction for keeping it for the
// bailout path. This give us the ability to truncate instructions
// even after the removal of branches.
if (shouldClone && !CloneForDeadBranches(alloc(), *iter)) {
return false;
}
// TruncateAfterBailouts keeps the bailout code as-is and
// continues with truncated operations, with the expectation
// that we are unlikely to bail out. If we do bail out, then we
// will set a flag in FinishBailoutToBaseline to prevent eager
// truncation when we recompile, to avoid bailout loops.
if (kind == TruncateKind::TruncateAfterBailouts) {
iter->setBailoutKind(BailoutKind::EagerTruncation);
}
iter->truncate(kind);
// Delay updates of inputs/outputs to avoid creating node which
// would be removed by the truncation of the next operations.
iter->setInWorklist();
if (!worklist.append(*iter)) {
return false;
}
}
for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
iter != end; ++iter) {
bool shouldClone = false;
TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
// Truncate this phi if possible.
if (shouldClone || !canTruncate(*iter, kind) || !iter->canTruncate()) {
continue;
}
SpewTruncate(*iter, kind, shouldClone);
iter->truncate(kind);
// Delay updates of inputs/outputs to avoid creating node which
// would be removed by the truncation of the next operations.
iter->setInWorklist();
if (!worklist.append(*iter)) {
return false;
}
}
}
// Update inputs/outputs of truncated instructions.
JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
while (!worklist.empty()) {
if (!alloc().ensureBallast()) {
return false;
}
MDefinition* def = worklist.popCopy();
def->setNotInWorklist();
RemoveTruncatesOnOutput(def);
adjustTruncatedInputs(def);
}
return true;
}
bool RangeAnalysis::removeUnnecessaryBitops() {
JitSpew(JitSpew_Range, "Begin (removeUnnecessaryBitops)");
// Note: This operation change the semantic of the program in a way which
// uniquely works with Int32, Recover Instructions added by the Sink phase
// expects the MIR Graph to still have a valid flow as-if they were double
// operations instead of Int32 operations. Thus, this phase should be
// executed after the Sink phase, and before DCE.
// Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
// input. This is done after range analysis rather than during GVN as the
// presence of the bitop can change which instructions are truncated.
for (size_t i = 0; i < bitops.length(); i++) {
MBinaryBitwiseInstruction* ins = bitops[i];
if (ins->isRecoveredOnBailout()) {
continue;
}
MDefinition* folded = ins->foldUnnecessaryBitop();
if (folded != ins) {
ins->replaceAllLiveUsesWith(folded);
ins->setRecoveredOnBailout();
}
}
bitops.clear();
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Collect Range information of operands
///////////////////////////////////////////////////////////////////////////////
void MInArray::collectRangeInfoPreTrunc() {
Range indexRange(index());
if (indexRange.isFiniteNonNegative()) {
needsNegativeIntCheck_ = false;
setNotGuard();
}
}
void MLoadElementHole::collectRangeInfoPreTrunc() {
Range indexRange(index());
if (indexRange.isFiniteNonNegative()) {
needsNegativeIntCheck_ = false;
setNotGuard();
}
}
void MInt32ToIntPtr::collectRangeInfoPreTrunc() {
Range inputRange(input());
if (inputRange.isFiniteNonNegative()) {
canBeNegative_ = false;
}
}
void MClz::collectRangeInfoPreTrunc() {
Range inputRange(input());
if (!inputRange.canBeZero()) {
operandIsNeverZero_ = true;
}
}
void MCtz::collectRangeInfoPreTrunc() {
Range inputRange(input());
if (!inputRange.canBeZero()) {
operandIsNeverZero_ = true;
}
}
void MDiv::collectRangeInfoPreTrunc() {
Range lhsRange(lhs());
Range rhsRange(rhs());
// Test if Dividend is non-negative.
if (lhsRange.isFiniteNonNegative()) {
canBeNegativeDividend_ = false;
}
// Try removing divide by zero check.
if (!rhsRange.canBeZero()) {
canBeDivideByZero_ = false;
}
// If lhsRange does not contain INT32_MIN in its range,
// negative overflow check can be skipped.
if (!lhsRange.contains(INT32_MIN)) {
canBeNegativeOverflow_ = false;
}
// If rhsRange does not contain -1 likewise.
if (!rhsRange.contains(-1)) {
canBeNegativeOverflow_ = false;
}
// If lhsRange does not contain a zero,
// negative zero check can be skipped.
if (!lhsRange.canBeZero()) {
canBeNegativeZero_ = false;
}
// If rhsRange >= 0 negative zero check can be skipped.
if (rhsRange.isFiniteNonNegative()) {
canBeNegativeZero_ = false;
}
if (fallible()) {
setGuardRangeBailoutsUnchecked();
}
}
void MMul::collectRangeInfoPreTrunc() {
Range lhsRange(lhs());
Range rhsRange(rhs());
// If lhsRange contains only positive then we can skip negative zero check.
if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero()) {
setCanBeNegativeZero(false);
}
// Likewise rhsRange.
if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero()) {
setCanBeNegativeZero(false);
}
// If rhsRange and lhsRange contain Non-negative integers only,
// We skip negative zero check.
if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative()) {
setCanBeNegativeZero(false);
}
// If rhsRange and lhsRange < 0. Then we skip negative zero check.
if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative()) {
setCanBeNegativeZero(false);
}
}
void MMod::collectRangeInfoPreTrunc() {
Range lhsRange(lhs());
Range rhsRange(rhs());
if (lhsRange.isFiniteNonNegative()) {
canBeNegativeDividend_ = false;
}
if (!rhsRange.canBeZero()) {
canBeDivideByZero_ = false;
}
if (type() == MIRType::Int32 && fallible()) {
setGuardRangeBailoutsUnchecked();
}
}
void MToNumberInt32::collectRangeInfoPreTrunc() {
Range inputRange(input());
if (!inputRange.canBeNegativeZero()) {
needsNegativeZeroCheck_ = false;
}
}
void MBoundsCheck::collectRangeInfoPreTrunc() {
Range indexRange(index());
Range lengthRange(length());
if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound()) {
return;
}
if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN()) {
return;
}
int64_t indexLower = indexRange.lower();
int64_t indexUpper = indexRange.upper();
int64_t lengthLower = lengthRange.lower();
int64_t min = minimum();
int64_t max = maximum();
if (indexLower + min >= 0 && indexUpper + max < lengthLower) {
fallible_ = false;
}
}
void MBoundsCheckLower::collectRangeInfoPreTrunc() {
Range indexRange(index());
if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_) {
fallible_ = false;
}
}
void MCompare::collectRangeInfoPreTrunc() {
if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) {
operandsAreNeverNaN_ = true;
}
}
void MNot::collectRangeInfoPreTrunc() {
if (!Range(input()).canBeNaN()) {
operandIsNeverNaN_ = true;
}
}
void MPowHalf::collectRangeInfoPreTrunc() {
Range inputRange(input());
if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound()) {
operandIsNeverNegativeInfinity_ = true;
}
if (!inputRange.canBeNegativeZero()) {
operandIsNeverNegativeZero_ = true;
}
if (!inputRange.canBeNaN()) {
operandIsNeverNaN_ = true;
}
}
void MUrsh::collectRangeInfoPreTrunc() {
if (type() == MIRType::Int64) {
return;
}
Range lhsRange(lhs()), rhsRange(rhs());
// As in MUrsh::computeRange(), convert the inputs.
lhsRange.wrapAroundToInt32();
rhsRange.wrapAroundToShiftCount();
// If the most significant bit of our result is always going to be zero,
// we can optimize by disabling bailout checks for enforcing an int32 range.
if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) {
bailoutsDisabled_ = true;
}
}
static bool DoesMaskMatchRange(int32_t mask, Range& range) {
// Check if range is positive, because the bitand operator in `(-3) & 0xff`
// can't be eliminated.
if (range.lower() >= 0) {
MOZ_ASSERT(range.isInt32());
// Check that the mask value has all bits set given the range upper bound.
// Note that the upper bound does not have to be exactly the mask value. For
// example, consider `x & 0xfff` where `x` is a uint8. That expression can
// still be optimized to `x`.
int bits = 1 + FloorLog2(range.upper());
uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
if ((mask & maskNeeded) == maskNeeded) {
return true;
}
}
return false;
}
void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
Range lhsRange(lhs());
Range rhsRange(rhs());
if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange)) {
maskMatchesRightRange = true;
}
if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange)) {
maskMatchesLeftRange = true;
}
}
void MNaNToZero::collectRangeInfoPreTrunc() {
Range inputRange(input());
if (!inputRange.canBeNaN()) {
operandIsNeverNaN_ = true;
}
if (!inputRange.canBeNegativeZero()) {
operandIsNeverNegativeZero_ = true;
}
}
bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode) {
*shouldRemoveDeadCode = false;
for (ReversePostorderIterator iter(graph_.rpoBegin());
iter != graph_.rpoEnd(); iter++) {
MBasicBlock* block = *iter;
if (!block->unreachable()) {
continue;
}
// Filter out unreachable fake entries.
if (block->numPredecessors() == 0) {
// Ignore fixup blocks added by the Value Numbering phase, in order
// to keep the dominator tree as-is when we have OSR Block which are
// no longer reachable from the main entry point of the graph.
MOZ_ASSERT(graph_.osrBlock());
continue;
}
MControlInstruction* cond = block->getPredecessor(0)->lastIns();
if (!cond->isTest()) {
continue;
}
// Replace the condition of the test control instruction by a constant
// chosen based which of the successors has the unreachable flag which is
// added by MBeta::computeRange on its own block.
MTest* test = cond->toTest();
MDefinition* condition = test->input();
// If the false-branch is unreachable, then the test condition must be true.
// If the true-branch is unreachable, then the test condition must be false.
MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
bool value = block == test->ifFalse();
MConstant* constant =
MConstant::New(alloc().fallible(), BooleanValue(value));
if (!constant) {
return false;
}
condition->setGuardRangeBailoutsUnchecked();
test->block()->insertBefore(test, constant);
test->replaceOperand(0, constant);
JitSpew(JitSpew_Range,
"Update condition of %u to reflect unreachable branches.",
test->id());
*shouldRemoveDeadCode = true;
}
return tryRemovingGuards();
}
bool RangeAnalysis::tryRemovingGuards() {
MDefinitionVector guards(alloc());
for (ReversePostorderIterator block = graph_.rpoBegin();
block != graph_.rpoEnd(); block++) {
for (MDefinitionIterator iter(*block); iter; iter++) {
if (!iter->isGuardRangeBailouts()) {
continue;
}
iter->setInWorklist();
if (!guards.append(*iter)) {
return false;
}
}
}
// Flag all fallible instructions which were indirectly used in the
// computation of the condition, such that we do not ignore
// bailout-paths which are used to shrink the input range of the
// operands of the condition.
for (size_t i = 0; i < guards.length(); i++) {
MDefinition* guard = guards[i];
// If this ins is a guard even without guardRangeBailouts,
// there is no reason in trying to hoist the guardRangeBailouts check.
guard->setNotGuardRangeBailouts();
if (!DeadIfUnused(guard)) {
guard->setGuardRangeBailouts();
continue;
}
guard->setGuardRangeBailouts();
if (!guard->isPhi()) {
if (!guard->range()) {
continue;
}
// Filter the range of the instruction based on its MIRType.
Range typeFilteredRange(guard);
// If the output range is updated by adding the inner range,
// then the MIRType act as an effectful filter. As we do not know if
// this filtered Range might change or not the result of the
// previous comparison, we have to keep this instruction as a guard
// because it has to bailout in order to restrict the Range to its
// MIRType.
if (typeFilteredRange.update(guard->range())) {
continue;
}
}
guard->setNotGuardRangeBailouts();
// Propagate the guard to its operands.
for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
MDefinition* operand = guard->getOperand(op);
// Already marked.
if (operand->isInWorklist()) {
continue;
}
MOZ_ASSERT(!operand->isGuardRangeBailouts());
operand->setInWorklist();
operand->setGuardRangeBailouts();
if (!guards.append(operand)) {
return false;
}
}
}
for (size_t i = 0; i < guards.length(); i++) {
MDefinition* guard = guards[i];
guard->setNotInWorklist();
}
return true;
}
|