summaryrefslogtreecommitdiffstats
path: root/src/runtime/trace.go
blob: a9cfa22337e1bc29c86ef0c3a84e43fc4a070674 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build !goexperiment.exectracer2

// Go execution tracer.
// The tracer captures a wide range of execution events like goroutine
// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
// changes of heap size, processor start/stop, etc and writes them to a buffer
// in a compact form. A precise nanosecond-precision timestamp and a stack
// trace is captured for most events.
// See https://golang.org/s/go15trace for more info.

package runtime

import (
	"internal/abi"
	"internal/goarch"
	"internal/goos"
	"runtime/internal/atomic"
	"runtime/internal/sys"
	"unsafe"
)

// Event types in the trace, args are given in square brackets.
const (
	traceEvNone              = 0  // unused
	traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
	traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
	traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
	traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
	traceEvProcStart         = 5  // start of P [timestamp, thread id]
	traceEvProcStop          = 6  // stop of P [timestamp]
	traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
	traceEvGCDone            = 8  // GC done [timestamp]
	traceEvSTWStart          = 9  // STW start [timestamp, kind]
	traceEvSTWDone           = 10 // STW done [timestamp]
	traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
	traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
	traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
	traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
	traceEvGoEnd             = 15 // goroutine ends [timestamp]
	traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
	traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
	traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
	traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
	traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
	traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
	traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
	traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
	traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
	traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
	traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
	traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
	traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
	traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
	traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
	traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
	traceEvHeapAlloc         = 33 // gcController.heapLive change [timestamp, heap_alloc]
	traceEvHeapGoal          = 34 // gcController.heapGoal() (formerly next_gc) change [timestamp, heap goal in bytes]
	traceEvTimerGoroutine    = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
	traceEvFutileWakeup      = 36 // not currently used; denotes that the previous wakeup of this goroutine was futile [timestamp]
	traceEvString            = 37 // string dictionary entry [ID, length, string]
	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
	traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
	traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
	traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
	traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
	traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
	traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
	traceEvUserTaskCreate    = 45 // trace.NewTask [timestamp, internal task id, internal parent task id, name string, stack]
	traceEvUserTaskEnd       = 46 // end of a task [timestamp, internal task id, stack]
	traceEvUserRegion        = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), name string, stack]
	traceEvUserLog           = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
	traceEvCPUSample         = 49 // CPU profiling sample [timestamp, real timestamp, real P id (-1 when absent), goroutine id, stack]
	traceEvCount             = 50
	// Byte is used but only 6 bits are available for event type.
	// The remaining 2 bits are used to specify the number of arguments.
	// That means, the max event type value is 63.
)

// traceBlockReason is an enumeration of reasons a goroutine might block.
// This is the interface the rest of the runtime uses to tell the
// tracer why a goroutine blocked. The tracer then propagates this information
// into the trace however it sees fit.
//
// Note that traceBlockReasons should not be compared, since reasons that are
// distinct by name may *not* be distinct by value.
type traceBlockReason uint8

// For maximal efficiency, just map the trace block reason directly to a trace
// event.
const (
	traceBlockGeneric         traceBlockReason = traceEvGoBlock
	traceBlockForever                          = traceEvGoStop
	traceBlockNet                              = traceEvGoBlockNet
	traceBlockSelect                           = traceEvGoBlockSelect
	traceBlockCondWait                         = traceEvGoBlockCond
	traceBlockSync                             = traceEvGoBlockSync
	traceBlockChanSend                         = traceEvGoBlockSend
	traceBlockChanRecv                         = traceEvGoBlockRecv
	traceBlockGCMarkAssist                     = traceEvGoBlockGC
	traceBlockGCSweep                          = traceEvGoBlock
	traceBlockSystemGoroutine                  = traceEvGoBlock
	traceBlockPreempted                        = traceEvGoBlock
	traceBlockDebugCall                        = traceEvGoBlock
	traceBlockUntilGCEnds                      = traceEvGoBlock
	traceBlockSleep                            = traceEvGoSleep
)

const (
	// Timestamps in trace are cputicks/traceTickDiv.
	// This makes absolute values of timestamp diffs smaller,
	// and so they are encoded in less number of bytes.
	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
	// The suggested increment frequency for PowerPC's time base register is
	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
	// and ppc64le.
	traceTimeDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
	// Maximum number of PCs in a single stack trace.
	// Since events contain only stack id rather than whole stack trace,
	// we can allow quite large values here.
	traceStackSize = 128
	// Identifier of a fake P that is used when we trace without a real P.
	traceGlobProc = -1
	// Maximum number of bytes to encode uint64 in base-128.
	traceBytesPerNumber = 10
	// Shift of the number of arguments in the first event byte.
	traceArgCountShift = 6
)

// trace is global tracing context.
var trace struct {
	// trace.lock must only be acquired on the system stack where
	// stack splits cannot happen while it is held.
	lock          mutex       // protects the following members
	enabled       bool        // when set runtime traces events
	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
	headerWritten bool        // whether ReadTrace has emitted trace header
	footerWritten bool        // whether ReadTrace has emitted trace footer
	shutdownSema  uint32      // used to wait for ReadTrace completion
	seqStart      uint64      // sequence number when tracing was started
	startTicks    int64       // cputicks when tracing was started
	endTicks      int64       // cputicks when tracing was stopped
	startNanotime int64       // nanotime when tracing was started
	endNanotime   int64       // nanotime when tracing was stopped
	startTime     traceTime   // traceClockNow when tracing started
	endTime       traceTime   // traceClockNow when tracing stopped
	seqGC         uint64      // GC start/done sequencer
	reading       traceBufPtr // buffer currently handed off to user
	empty         traceBufPtr // stack of empty buffers
	fullHead      traceBufPtr // queue of full buffers
	fullTail      traceBufPtr
	stackTab      traceStackTable // maps stack traces to unique ids
	// cpuLogRead accepts CPU profile samples from the signal handler where
	// they're generated. It uses a two-word header to hold the IDs of the P and
	// G (respectively) that were active at the time of the sample. Because
	// profBuf uses a record with all zeros in its header to indicate overflow,
	// we make sure to make the P field always non-zero: The ID of a real P will
	// start at bit 1, and bit 0 will be set. Samples that arrive while no P is
	// running (such as near syscalls) will set the first header field to 0b10.
	// This careful handling of the first header field allows us to store ID of
	// the active G directly in the second field, even though that will be 0
	// when sampling g0.
	cpuLogRead *profBuf
	// cpuLogBuf is a trace buffer to hold events corresponding to CPU profile
	// samples, which arrive out of band and not directly connected to a
	// specific P.
	cpuLogBuf traceBufPtr

	reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil

	signalLock  atomic.Uint32 // protects use of the following member, only usable in signal handlers
	cpuLogWrite *profBuf      // copy of cpuLogRead for use in signal handlers, set without signalLock

	// Dictionary for traceEvString.
	//
	// TODO: central lock to access the map is not ideal.
	//   option: pre-assign ids to all user annotation region names and tags
	//   option: per-P cache
	//   option: sync.Map like data structure
	stringsLock mutex
	strings     map[string]uint64
	stringSeq   uint64

	// markWorkerLabels maps gcMarkWorkerMode to string ID.
	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64

	bufLock mutex       // protects buf
	buf     traceBufPtr // global trace buffer, used when running without a p
}

// gTraceState is per-G state for the tracer.
type gTraceState struct {
	sysExitTime        traceTime // timestamp when syscall has returned
	tracedSyscallEnter bool      // syscall or cgo was entered while trace was enabled or StartTrace has emitted EvGoInSyscall about this goroutine
	seq                uint64    // trace event sequencer
	lastP              puintptr  // last P emitted an event for this goroutine
}

// Unused; for compatibility with the new tracer.
func (s *gTraceState) reset() {}

// mTraceState is per-M state for the tracer.
type mTraceState struct {
	startingTrace  bool // this M is in TraceStart, potentially before traceEnabled is true
	tracedSTWStart bool // this M traced a STW start, so it should trace an end
}

// pTraceState is per-P state for the tracer.
type pTraceState struct {
	buf traceBufPtr

	// inSweep indicates the sweep events should be traced.
	// This is used to defer the sweep start event until a span
	// has actually been swept.
	inSweep bool

	// swept and reclaimed track the number of bytes swept and reclaimed
	// by sweeping in the current sweep loop (while inSweep was true).
	swept, reclaimed uintptr
}

// traceLockInit initializes global trace locks.
func traceLockInit() {
	lockInit(&trace.bufLock, lockRankTraceBuf)
	lockInit(&trace.stringsLock, lockRankTraceStrings)
	lockInit(&trace.lock, lockRankTrace)
	lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
}

// traceBufHeader is per-P tracing buffer.
type traceBufHeader struct {
	link     traceBufPtr             // in trace.empty/full
	lastTime traceTime               // when we wrote the last event
	pos      int                     // next write offset in arr
	stk      [traceStackSize]uintptr // scratch buffer for traceback
}

// traceBuf is per-P tracing buffer.
type traceBuf struct {
	_ sys.NotInHeap
	traceBufHeader
	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
}

// traceBufPtr is a *traceBuf that is not traced by the garbage
// collector and doesn't have write barriers. traceBufs are not
// allocated from the GC'd heap, so this is safe, and are often
// manipulated in contexts where write barriers are not allowed, so
// this is necessary.
//
// TODO: Since traceBuf is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
type traceBufPtr uintptr

func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
func traceBufPtrOf(b *traceBuf) traceBufPtr {
	return traceBufPtr(unsafe.Pointer(b))
}

// traceEnabled returns true if the trace is currently enabled.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceEnabled() bool {
	return trace.enabled
}

// traceShuttingDown returns true if the trace is currently shutting down.
//
//go:nosplit
func traceShuttingDown() bool {
	return trace.shutdown
}

// traceLocker represents an M writing trace events. While a traceLocker value
// is valid, the tracer observes all operations on the G/M/P or trace events being
// written as happening atomically.
//
// This doesn't do much for the current tracer, because the current tracer doesn't
// need atomicity around non-trace runtime operations. All the state it needs it
// collects carefully during a STW.
type traceLocker struct {
	enabled bool
}

// traceAcquire prepares this M for writing one or more trace events.
//
// This exists for compatibility with the upcoming new tracer; it doesn't do much
// in the current tracer.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceAcquire() traceLocker {
	if !traceEnabled() {
		return traceLocker{false}
	}
	return traceLocker{true}
}

// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func (tl traceLocker) ok() bool {
	return tl.enabled
}

// traceRelease indicates that this M is done writing trace events.
//
// This exists for compatibility with the upcoming new tracer; it doesn't do anything
// in the current tracer.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceRelease(tl traceLocker) {
}

// StartTrace enables tracing for the current process.
// While tracing, the data will be buffered and available via [ReadTrace].
// StartTrace returns an error if tracing is already enabled.
// Most clients should use the [runtime/trace] package or the [testing] package's
// -test.trace flag instead of calling StartTrace directly.
func StartTrace() error {
	// Stop the world so that we can take a consistent snapshot
	// of all goroutines at the beginning of the trace.
	// Do not stop the world during GC so we ensure we always see
	// a consistent view of GC-related events (e.g. a start is always
	// paired with an end).
	stw := stopTheWorldGC(stwStartTrace)

	// Prevent sysmon from running any code that could generate events.
	lock(&sched.sysmonlock)

	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
	// Exitsyscall could check trace.enabled long before and then suddenly wake up
	// and decide to write to trace at a random point in time.
	// However, such syscall will use the global trace.buf buffer, because we've
	// acquired all p's by doing stop-the-world. So this protects us from such races.
	lock(&trace.bufLock)

	if trace.enabled || trace.shutdown {
		unlock(&trace.bufLock)
		unlock(&sched.sysmonlock)
		startTheWorldGC(stw)
		return errorString("tracing is already enabled")
	}

	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
	// That would lead to an inconsistent trace:
	// - either GoSysExit appears before EvGoInSyscall,
	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
	// To instruct traceEvent that it must not ignore events below, we set trace.startingTrace.
	// trace.enabled is set afterwards once we have emitted all preliminary events.
	mp := getg().m
	mp.trace.startingTrace = true

	// Obtain current stack ID to use in all traceEvGoCreate events below.
	stkBuf := make([]uintptr, traceStackSize)
	stackID := traceStackID(mp, stkBuf, 2)

	profBuf := newProfBuf(2, profBufWordCount, profBufTagCount) // after the timestamp, header is [pp.id, gp.goid]
	trace.cpuLogRead = profBuf

	// We must not acquire trace.signalLock outside of a signal handler: a
	// profiling signal may arrive at any time and try to acquire it, leading to
	// deadlock. Because we can't use that lock to protect updates to
	// trace.cpuLogWrite (only use of the structure it references), reads and
	// writes of the pointer must be atomic. (And although this field is never
	// the sole pointer to the profBuf value, it's best to allow a write barrier
	// here.)
	atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))

	// World is stopped, no need to lock.
	forEachGRace(func(gp *g) {
		status := readgstatus(gp)
		if status != _Gdead {
			gp.trace.seq = 0
			gp.trace.lastP = getg().m.p
			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
			id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(gp.startpc) + sys.PCQuantum})
			traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
		}
		if status == _Gwaiting {
			// traceEvGoWaiting is implied to have seq=1.
			gp.trace.seq++
			traceEvent(traceEvGoWaiting, -1, gp.goid)
		}
		if status == _Gsyscall {
			gp.trace.seq++
			gp.trace.tracedSyscallEnter = true
			traceEvent(traceEvGoInSyscall, -1, gp.goid)
		} else if status == _Gdead && gp.m != nil && gp.m.isextra {
			// Trigger two trace events for the dead g in the extra m,
			// since the next event of the g will be traceEvGoSysExit in exitsyscall,
			// while calling from C thread to Go.
			gp.trace.seq = 0
			gp.trace.lastP = getg().m.p
			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
			id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(0) + sys.PCQuantum}) // no start pc
			traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
			gp.trace.seq++
			gp.trace.tracedSyscallEnter = true
			traceEvent(traceEvGoInSyscall, -1, gp.goid)
		} else {
			// We need to explicitly clear the flag. A previous trace might have ended with a goroutine
			// not emitting a GoSysExit and clearing the flag, leaving it in a stale state. Clearing
			// it here makes it unambiguous to any goroutine exiting a syscall racing with us that
			// no EvGoInSyscall event was emitted for it. (It's not racy to set this flag here, because
			// it'll only get checked when the goroutine runs again, which will be after the world starts
			// again.)
			gp.trace.tracedSyscallEnter = false
		}
	})
	// Use a dummy traceLocker. The trace isn't enabled yet, but we can still write events.
	tl := traceLocker{}
	tl.ProcStart()
	tl.GoStart()
	// Note: startTicks needs to be set after we emit traceEvGoInSyscall events.
	// If we do it the other way around, it is possible that exitsyscall will
	// query sysExitTime after startTicks but before traceEvGoInSyscall timestamp.
	// It will lead to a false conclusion that cputicks is broken.
	trace.startTime = traceClockNow()
	trace.startTicks = cputicks()
	trace.startNanotime = nanotime()
	trace.headerWritten = false
	trace.footerWritten = false

	// string to id mapping
	//  0 : reserved for an empty string
	//  remaining: other strings registered by traceString
	trace.stringSeq = 0
	trace.strings = make(map[string]uint64)

	trace.seqGC = 0
	mp.trace.startingTrace = false
	trace.enabled = true

	// Register runtime goroutine labels.
	_, pid, bufp := traceAcquireBuffer()
	for i, label := range gcMarkWorkerModeStrings[:] {
		trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
	}
	traceReleaseBuffer(mp, pid)

	unlock(&trace.bufLock)

	unlock(&sched.sysmonlock)

	// Record the current state of HeapGoal to avoid information loss in trace.
	//
	// Use the same dummy trace locker. The trace can't end until after we start
	// the world, and we can safely trace from here.
	tl.HeapGoal()

	startTheWorldGC(stw)
	return nil
}

// StopTrace stops tracing, if it was previously enabled.
// StopTrace only returns after all the reads for the trace have completed.
func StopTrace() {
	// Stop the world so that we can collect the trace buffers from all p's below,
	// and also to avoid races with traceEvent.
	stw := stopTheWorldGC(stwStopTrace)

	// See the comment in StartTrace.
	lock(&sched.sysmonlock)

	// See the comment in StartTrace.
	lock(&trace.bufLock)

	if !trace.enabled {
		unlock(&trace.bufLock)
		unlock(&sched.sysmonlock)
		startTheWorldGC(stw)
		return
	}

	// Trace GoSched for us, and use a dummy locker. The world is stopped
	// and we control whether the trace is enabled, so this is safe.
	tl := traceLocker{}
	tl.GoSched()

	atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
	trace.cpuLogRead.close()
	traceReadCPU()

	// Loop over all allocated Ps because dead Ps may still have
	// trace buffers.
	for _, p := range allp[:cap(allp)] {
		buf := p.trace.buf
		if buf != 0 {
			traceFullQueue(buf)
			p.trace.buf = 0
		}
	}
	if trace.buf != 0 {
		buf := trace.buf
		trace.buf = 0
		if buf.ptr().pos != 0 {
			traceFullQueue(buf)
		}
	}
	if trace.cpuLogBuf != 0 {
		buf := trace.cpuLogBuf
		trace.cpuLogBuf = 0
		if buf.ptr().pos != 0 {
			traceFullQueue(buf)
		}
	}

	// Wait for startNanotime != endNanotime. On Windows the default interval between
	// system clock ticks is typically between 1 and 15 milliseconds, which may not
	// have passed since the trace started. Without nanotime moving forward, trace
	// tooling has no way of identifying how much real time each cputicks time deltas
	// represent.
	for {
		trace.endTime = traceClockNow()
		trace.endTicks = cputicks()
		trace.endNanotime = nanotime()

		if trace.endNanotime != trace.startNanotime || faketime != 0 {
			break
		}
		osyield()
	}

	trace.enabled = false
	trace.shutdown = true
	unlock(&trace.bufLock)

	unlock(&sched.sysmonlock)

	startTheWorldGC(stw)

	// The world is started but we've set trace.shutdown, so new tracing can't start.
	// Wait for the trace reader to flush pending buffers and stop.
	semacquire(&trace.shutdownSema)
	if raceenabled {
		raceacquire(unsafe.Pointer(&trace.shutdownSema))
	}

	systemstack(func() {
		// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
		lock(&trace.lock)
		for _, p := range allp[:cap(allp)] {
			if p.trace.buf != 0 {
				throw("trace: non-empty trace buffer in proc")
			}
		}
		if trace.buf != 0 {
			throw("trace: non-empty global trace buffer")
		}
		if trace.fullHead != 0 || trace.fullTail != 0 {
			throw("trace: non-empty full trace buffer")
		}
		if trace.reading != 0 || trace.reader.Load() != nil {
			throw("trace: reading after shutdown")
		}
		for trace.empty != 0 {
			buf := trace.empty
			trace.empty = buf.ptr().link
			sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
		}
		trace.strings = nil
		trace.shutdown = false
		trace.cpuLogRead = nil
		unlock(&trace.lock)
	})
}

// ReadTrace returns the next chunk of binary tracing data, blocking until data
// is available. If tracing is turned off and all the data accumulated while it
// was on has been returned, ReadTrace returns nil. The caller must copy the
// returned data before calling ReadTrace again.
// ReadTrace must be called from one goroutine at a time.
func ReadTrace() []byte {
top:
	var buf []byte
	var park bool
	systemstack(func() {
		buf, park = readTrace0()
	})
	if park {
		gopark(func(gp *g, _ unsafe.Pointer) bool {
			if !trace.reader.CompareAndSwapNoWB(nil, gp) {
				// We're racing with another reader.
				// Wake up and handle this case.
				return false
			}

			if g2 := traceReader(); gp == g2 {
				// New data arrived between unlocking
				// and the CAS and we won the wake-up
				// race, so wake up directly.
				return false
			} else if g2 != nil {
				printlock()
				println("runtime: got trace reader", g2, g2.goid)
				throw("unexpected trace reader")
			}

			return true
		}, nil, waitReasonTraceReaderBlocked, traceBlockSystemGoroutine, 2)
		goto top
	}

	return buf
}

// readTrace0 is ReadTrace's continuation on g0. This must run on the
// system stack because it acquires trace.lock.
//
//go:systemstack
func readTrace0() (buf []byte, park bool) {
	if raceenabled {
		// g0 doesn't have a race context. Borrow the user G's.
		if getg().racectx != 0 {
			throw("expected racectx == 0")
		}
		getg().racectx = getg().m.curg.racectx
		// (This defer should get open-coded, which is safe on
		// the system stack.)
		defer func() { getg().racectx = 0 }()
	}

	// Optimistically look for CPU profile samples. This may write new stack
	// records, and may write new tracing buffers. This must be done with the
	// trace lock not held. footerWritten and shutdown are safe to access
	// here. They are only mutated by this goroutine or during a STW.
	if !trace.footerWritten && !trace.shutdown {
		traceReadCPU()
	}

	// This function must not allocate while holding trace.lock:
	// allocation can call heap allocate, which will try to emit a trace
	// event while holding heap lock.
	lock(&trace.lock)

	if trace.reader.Load() != nil {
		// More than one goroutine reads trace. This is bad.
		// But we rather do not crash the program because of tracing,
		// because tracing can be enabled at runtime on prod servers.
		unlock(&trace.lock)
		println("runtime: ReadTrace called from multiple goroutines simultaneously")
		return nil, false
	}
	// Recycle the old buffer.
	if buf := trace.reading; buf != 0 {
		buf.ptr().link = trace.empty
		trace.empty = buf
		trace.reading = 0
	}
	// Write trace header.
	if !trace.headerWritten {
		trace.headerWritten = true
		unlock(&trace.lock)
		return []byte("go 1.21 trace\x00\x00\x00"), false
	}
	// Wait for new data.
	if trace.fullHead == 0 && !trace.shutdown {
		// We don't simply use a note because the scheduler
		// executes this goroutine directly when it wakes up
		// (also a note would consume an M).
		unlock(&trace.lock)
		return nil, true
	}
newFull:
	assertLockHeld(&trace.lock)
	// Write a buffer.
	if trace.fullHead != 0 {
		buf := traceFullDequeue()
		trace.reading = buf
		unlock(&trace.lock)
		return buf.ptr().arr[:buf.ptr().pos], false
	}

	// Write footer with timer frequency.
	if !trace.footerWritten {
		trace.footerWritten = true
		freq := (float64(trace.endTicks-trace.startTicks) / traceTimeDiv) / (float64(trace.endNanotime-trace.startNanotime) / 1e9)
		if freq <= 0 {
			throw("trace: ReadTrace got invalid frequency")
		}
		unlock(&trace.lock)

		// Write frequency event.
		bufp := traceFlush(0, 0)
		buf := bufp.ptr()
		buf.byte(traceEvFrequency | 0<<traceArgCountShift)
		buf.varint(uint64(freq))

		// Dump stack table.
		// This will emit a bunch of full buffers, we will pick them up
		// on the next iteration.
		bufp = trace.stackTab.dump(bufp)

		// Flush final buffer.
		lock(&trace.lock)
		traceFullQueue(bufp)
		goto newFull // trace.lock should be held at newFull
	}
	// Done.
	if trace.shutdown {
		unlock(&trace.lock)
		if raceenabled {
			// Model synchronization on trace.shutdownSema, which race
			// detector does not see. This is required to avoid false
			// race reports on writer passed to trace.Start.
			racerelease(unsafe.Pointer(&trace.shutdownSema))
		}
		// trace.enabled is already reset, so can call traceable functions.
		semrelease(&trace.shutdownSema)
		return nil, false
	}
	// Also bad, but see the comment above.
	unlock(&trace.lock)
	println("runtime: spurious wakeup of trace reader")
	return nil, false
}

// traceReader returns the trace reader that should be woken up, if any.
// Callers should first check that trace.enabled or trace.shutdown is set.
//
// This must run on the system stack because it acquires trace.lock.
//
//go:systemstack
func traceReader() *g {
	// Optimistic check first
	if traceReaderAvailable() == nil {
		return nil
	}
	lock(&trace.lock)
	gp := traceReaderAvailable()
	if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
		unlock(&trace.lock)
		return nil
	}
	unlock(&trace.lock)
	return gp
}

// traceReaderAvailable returns the trace reader if it is not currently
// scheduled and should be. Callers should first check that trace.enabled
// or trace.shutdown is set.
func traceReaderAvailable() *g {
	if trace.fullHead != 0 || trace.shutdown {
		return trace.reader.Load()
	}
	return nil
}

// traceProcFree frees trace buffer associated with pp.
//
// This must run on the system stack because it acquires trace.lock.
//
//go:systemstack
func traceProcFree(pp *p) {
	buf := pp.trace.buf
	pp.trace.buf = 0
	if buf == 0 {
		return
	}
	lock(&trace.lock)
	traceFullQueue(buf)
	unlock(&trace.lock)
}

// ThreadDestroy is a no-op. It exists as a stub to support the new tracer.
//
// This must run on the system stack, just to match the new tracer.
func traceThreadDestroy(_ *m) {
	// No-op in old tracer.
}

// traceFullQueue queues buf into queue of full buffers.
func traceFullQueue(buf traceBufPtr) {
	buf.ptr().link = 0
	if trace.fullHead == 0 {
		trace.fullHead = buf
	} else {
		trace.fullTail.ptr().link = buf
	}
	trace.fullTail = buf
}

// traceFullDequeue dequeues from queue of full buffers.
func traceFullDequeue() traceBufPtr {
	buf := trace.fullHead
	if buf == 0 {
		return 0
	}
	trace.fullHead = buf.ptr().link
	if trace.fullHead == 0 {
		trace.fullTail = 0
	}
	buf.ptr().link = 0
	return buf
}

// traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
// ev is event type.
// If skip > 0, write current stack id as the last argument (skipping skip top frames).
// If skip = 0, this event type should contain a stack, but we don't want
// to collect and remember it for this particular call.
func traceEvent(ev byte, skip int, args ...uint64) {
	mp, pid, bufp := traceAcquireBuffer()
	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
	// This protects from races between traceEvent and StartTrace/StopTrace.

	// The caller checked that trace.enabled == true, but trace.enabled might have been
	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
	//
	// Note trace_userTaskCreate runs the same check.
	if !trace.enabled && !mp.trace.startingTrace {
		traceReleaseBuffer(mp, pid)
		return
	}

	if skip > 0 {
		if getg() == mp.curg {
			skip++ // +1 because stack is captured in traceEventLocked.
		}
	}
	traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
	traceReleaseBuffer(mp, pid)
}

// traceEventLocked writes a single event of type ev to the trace buffer bufp,
// flushing the buffer if necessary. pid is the id of the current P, or
// traceGlobProc if we're tracing without a real P.
//
// Preemption is disabled, and if running without a real P the global tracing
// buffer is locked.
//
// Events types that do not include a stack set skip to -1. Event types that
// include a stack may explicitly reference a stackID from the trace.stackTab
// (obtained by an earlier call to traceStackID). Without an explicit stackID,
// this function will automatically capture the stack of the goroutine currently
// running on mp, skipping skip top frames or, if skip is 0, writing out an
// empty stack record.
//
// It records the event's args to the traceBuf, and also makes an effort to
// reserve extraBytes bytes of additional space immediately following the event,
// in the same traceBuf.
func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
	buf := bufp.ptr()
	// TODO: test on non-zero extraBytes param.
	maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
	if buf == nil || len(buf.arr)-buf.pos < maxSize {
		systemstack(func() {
			buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
		})
		bufp.set(buf)
	}

	ts := traceClockNow()
	if ts <= buf.lastTime {
		ts = buf.lastTime + 1
	}
	tsDiff := uint64(ts - buf.lastTime)
	buf.lastTime = ts
	narg := byte(len(args))
	if stackID != 0 || skip >= 0 {
		narg++
	}
	// We have only 2 bits for number of arguments.
	// If number is >= 3, then the event type is followed by event length in bytes.
	if narg > 3 {
		narg = 3
	}
	startPos := buf.pos
	buf.byte(ev | narg<<traceArgCountShift)
	var lenp *byte
	if narg == 3 {
		// Reserve the byte for length assuming that length < 128.
		buf.varint(0)
		lenp = &buf.arr[buf.pos-1]
	}
	buf.varint(tsDiff)
	for _, a := range args {
		buf.varint(a)
	}
	if stackID != 0 {
		buf.varint(uint64(stackID))
	} else if skip == 0 {
		buf.varint(0)
	} else if skip > 0 {
		buf.varint(traceStackID(mp, buf.stk[:], skip))
	}
	evSize := buf.pos - startPos
	if evSize > maxSize {
		throw("invalid length of trace event")
	}
	if lenp != nil {
		// Fill in actual length.
		*lenp = byte(evSize - 2)
	}
}

// traceCPUSample writes a CPU profile sample stack to the execution tracer's
// profiling buffer. It is called from a signal handler, so is limited in what
// it can do.
func traceCPUSample(gp *g, _ *m, pp *p, stk []uintptr) {
	if !traceEnabled() {
		// Tracing is usually turned off; don't spend time acquiring the signal
		// lock unless it's active.
		return
	}

	// Match the clock used in traceEventLocked
	now := traceClockNow()
	// The "header" here is the ID of the P that was running the profiled code,
	// followed by the ID of the goroutine. (For normal CPU profiling, it's
	// usually the number of samples with the given stack.) Near syscalls, pp
	// may be nil. Reporting goid of 0 is fine for either g0 or a nil gp.
	var hdr [2]uint64
	if pp != nil {
		// Overflow records in profBuf have all header values set to zero. Make
		// sure that real headers have at least one bit set.
		hdr[0] = uint64(pp.id)<<1 | 0b1
	} else {
		hdr[0] = 0b10
	}
	if gp != nil {
		hdr[1] = gp.goid
	}

	// Allow only one writer at a time
	for !trace.signalLock.CompareAndSwap(0, 1) {
		// TODO: Is it safe to osyield here? https://go.dev/issue/52672
		osyield()
	}

	if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
		// Note: we don't pass a tag pointer here (how should profiling tags
		// interact with the execution tracer?), but if we did we'd need to be
		// careful about write barriers. See the long comment in profBuf.write.
		log.write(nil, int64(now), hdr[:], stk)
	}

	trace.signalLock.Store(0)
}

func traceReadCPU() {
	bufp := &trace.cpuLogBuf

	for {
		data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
		if len(data) == 0 {
			break
		}
		for len(data) > 0 {
			if len(data) < 4 || data[0] > uint64(len(data)) {
				break // truncated profile
			}
			if data[0] < 4 || tags != nil && len(tags) < 1 {
				break // malformed profile
			}
			if len(tags) < 1 {
				break // mismatched profile records and tags
			}
			timestamp := data[1]
			ppid := data[2] >> 1
			if hasP := (data[2] & 0b1) != 0; !hasP {
				ppid = ^uint64(0)
			}
			goid := data[3]
			stk := data[4:data[0]]
			empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
			data = data[data[0]:]
			// No support here for reporting goroutine tags at the moment; if
			// that information is to be part of the execution trace, we'd
			// probably want to see when the tags are applied and when they
			// change, instead of only seeing them when we get a CPU sample.
			tags = tags[1:]

			if empty {
				// Looks like an overflow record from the profBuf. Not much to
				// do here, we only want to report full records.
				//
				// TODO: should we start a goroutine to drain the profBuf,
				// rather than relying on a high-enough volume of tracing events
				// to keep ReadTrace busy? https://go.dev/issue/52674
				continue
			}

			buf := bufp.ptr()
			if buf == nil {
				systemstack(func() {
					*bufp = traceFlush(*bufp, 0)
				})
				buf = bufp.ptr()
			}
			nstk := 1
			buf.stk[0] = logicalStackSentinel
			for ; nstk < len(buf.stk) && nstk-1 < len(stk); nstk++ {
				buf.stk[nstk] = uintptr(stk[nstk-1])
			}
			stackID := trace.stackTab.put(buf.stk[:nstk])

			traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp, ppid, goid)
		}
	}
}

// logicalStackSentinel is a sentinel value at pcBuf[0] signifying that
// pcBuf[1:] holds a logical stack requiring no further processing. Any other
// value at pcBuf[0] represents a skip value to apply to the physical stack in
// pcBuf[1:] after inline expansion.
const logicalStackSentinel = ^uintptr(0)

// traceStackID captures a stack trace into pcBuf, registers it in the trace
// stack table, and returns its unique ID. pcBuf should have a length equal to
// traceStackSize. skip controls the number of leaf frames to omit in order to
// hide tracer internals from stack traces, see CL 5523.
func traceStackID(mp *m, pcBuf []uintptr, skip int) uint64 {
	gp := getg()
	curgp := mp.curg
	nstk := 1
	if tracefpunwindoff() || mp.hasCgoOnStack() {
		// Slow path: Unwind using default unwinder. Used when frame pointer
		// unwinding is unavailable or disabled (tracefpunwindoff), or might
		// produce incomplete results or crashes (hasCgoOnStack). Note that no
		// cgo callback related crashes have been observed yet. The main
		// motivation is to take advantage of a potentially registered cgo
		// symbolizer.
		pcBuf[0] = logicalStackSentinel
		if curgp == gp {
			nstk += callers(skip+1, pcBuf[1:])
		} else if curgp != nil {
			nstk += gcallers(curgp, skip, pcBuf[1:])
		}
	} else {
		// Fast path: Unwind using frame pointers.
		pcBuf[0] = uintptr(skip)
		if curgp == gp {
			nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
		} else if curgp != nil {
			// We're called on the g0 stack through mcall(fn) or systemstack(fn). To
			// behave like gcallers above, we start unwinding from sched.bp, which
			// points to the caller frame of the leaf frame on g's stack. The return
			// address of the leaf frame is stored in sched.pc, which we manually
			// capture here.
			pcBuf[1] = curgp.sched.pc
			nstk += 1 + fpTracebackPCs(unsafe.Pointer(curgp.sched.bp), pcBuf[2:])
		}
	}
	if nstk > 0 {
		nstk-- // skip runtime.goexit
	}
	if nstk > 0 && curgp.goid == 1 {
		nstk-- // skip runtime.main
	}
	id := trace.stackTab.put(pcBuf[:nstk])
	return uint64(id)
}

// tracefpunwindoff returns true if frame pointer unwinding for the tracer is
// disabled via GODEBUG or not supported by the architecture.
// TODO(#60254): support frame pointer unwinding on plan9/amd64.
func tracefpunwindoff() bool {
	return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64) || goos.IsPlan9 == 1
}

// fpTracebackPCs populates pcBuf with the return addresses for each frame and
// returns the number of PCs written to pcBuf. The returned PCs correspond to
// "physical frames" rather than "logical frames"; that is if A is inlined into
// B, this will return a PC for only B.
func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
	for i = 0; i < len(pcBuf) && fp != nil; i++ {
		// return addr sits one word above the frame pointer
		pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
		// follow the frame pointer to the next one
		fp = unsafe.Pointer(*(*uintptr)(fp))
	}
	return i
}

// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
	// Any time we acquire a buffer, we may end up flushing it,
	// but flushes are rare. Record the lock edge even if it
	// doesn't happen this time.
	lockRankMayTraceFlush()

	mp = acquirem()
	if p := mp.p.ptr(); p != nil {
		return mp, p.id, &p.trace.buf
	}
	lock(&trace.bufLock)
	return mp, traceGlobProc, &trace.buf
}

// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
func traceReleaseBuffer(mp *m, pid int32) {
	if pid == traceGlobProc {
		unlock(&trace.bufLock)
	}
	releasem(mp)
}

// lockRankMayTraceFlush records the lock ranking effects of a
// potential call to traceFlush.
func lockRankMayTraceFlush() {
	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
}

// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
//
// This must run on the system stack because it acquires trace.lock.
//
//go:systemstack
func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
	lock(&trace.lock)
	if buf != 0 {
		traceFullQueue(buf)
	}
	if trace.empty != 0 {
		buf = trace.empty
		trace.empty = buf.ptr().link
	} else {
		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
		if buf == 0 {
			throw("trace: out of memory")
		}
	}
	bufp := buf.ptr()
	bufp.link.set(nil)
	bufp.pos = 0

	// initialize the buffer for a new batch
	ts := traceClockNow()
	if ts <= bufp.lastTime {
		ts = bufp.lastTime + 1
	}
	bufp.lastTime = ts
	bufp.byte(traceEvBatch | 1<<traceArgCountShift)
	bufp.varint(uint64(pid))
	bufp.varint(uint64(ts))

	unlock(&trace.lock)
	return buf
}

// traceString adds a string to the trace.strings and returns the id.
func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
	if s == "" {
		return 0, bufp
	}

	lock(&trace.stringsLock)
	if raceenabled {
		// raceacquire is necessary because the map access
		// below is race annotated.
		raceacquire(unsafe.Pointer(&trace.stringsLock))
	}

	if id, ok := trace.strings[s]; ok {
		if raceenabled {
			racerelease(unsafe.Pointer(&trace.stringsLock))
		}
		unlock(&trace.stringsLock)

		return id, bufp
	}

	trace.stringSeq++
	id := trace.stringSeq
	trace.strings[s] = id

	if raceenabled {
		racerelease(unsafe.Pointer(&trace.stringsLock))
	}
	unlock(&trace.stringsLock)

	// memory allocation in above may trigger tracing and
	// cause *bufp changes. Following code now works with *bufp,
	// so there must be no memory allocation or any activities
	// that causes tracing after this point.

	buf := bufp.ptr()
	size := 1 + 2*traceBytesPerNumber + len(s)
	if buf == nil || len(buf.arr)-buf.pos < size {
		systemstack(func() {
			buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
			bufp.set(buf)
		})
	}
	buf.byte(traceEvString)
	buf.varint(id)

	// double-check the string and the length can fit.
	// Otherwise, truncate the string.
	slen := len(s)
	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
		slen = room
	}

	buf.varint(uint64(slen))
	buf.pos += copy(buf.arr[buf.pos:], s[:slen])

	bufp.set(buf)
	return id, bufp
}

// varint appends v to buf in little-endian-base-128 encoding.
func (buf *traceBuf) varint(v uint64) {
	pos := buf.pos
	for ; v >= 0x80; v >>= 7 {
		buf.arr[pos] = 0x80 | byte(v)
		pos++
	}
	buf.arr[pos] = byte(v)
	pos++
	buf.pos = pos
}

// varintAt writes varint v at byte position pos in buf. This always
// consumes traceBytesPerNumber bytes. This is intended for when the
// caller needs to reserve space for a varint but can't populate it
// until later.
func (buf *traceBuf) varintAt(pos int, v uint64) {
	for i := 0; i < traceBytesPerNumber; i++ {
		if i < traceBytesPerNumber-1 {
			buf.arr[pos] = 0x80 | byte(v)
		} else {
			buf.arr[pos] = byte(v)
		}
		v >>= 7
		pos++
	}
}

// byte appends v to buf.
func (buf *traceBuf) byte(v byte) {
	buf.arr[buf.pos] = v
	buf.pos++
}

// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
// It is lock-free for reading.
type traceStackTable struct {
	lock mutex // Must be acquired on the system stack
	seq  uint32
	mem  traceAlloc
	tab  [1 << 13]traceStackPtr
}

// traceStack is a single stack in traceStackTable.
type traceStack struct {
	link traceStackPtr
	hash uintptr
	id   uint32
	n    int
	stk  [0]uintptr // real type [n]uintptr
}

type traceStackPtr uintptr

func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }

// stack returns slice of PCs.
func (ts *traceStack) stack() []uintptr {
	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
}

// put returns a unique id for the stack trace pcs and caches it in the table,
// if it sees the trace for the first time.
func (tab *traceStackTable) put(pcs []uintptr) uint32 {
	if len(pcs) == 0 {
		return 0
	}
	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
	// First, search the hashtable w/o the mutex.
	if id := tab.find(pcs, hash); id != 0 {
		return id
	}
	// Now, double check under the mutex.
	// Switch to the system stack so we can acquire tab.lock
	var id uint32
	systemstack(func() {
		lock(&tab.lock)
		if id = tab.find(pcs, hash); id != 0 {
			unlock(&tab.lock)
			return
		}
		// Create new record.
		tab.seq++
		stk := tab.newStack(len(pcs))
		stk.hash = hash
		stk.id = tab.seq
		id = stk.id
		stk.n = len(pcs)
		stkpc := stk.stack()
		copy(stkpc, pcs)
		part := int(hash % uintptr(len(tab.tab)))
		stk.link = tab.tab[part]
		atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
		unlock(&tab.lock)
	})
	return id
}

// find checks if the stack trace pcs is already present in the table.
func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
	part := int(hash % uintptr(len(tab.tab)))
Search:
	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
		if stk.hash == hash && stk.n == len(pcs) {
			for i, stkpc := range stk.stack() {
				if stkpc != pcs[i] {
					continue Search
				}
			}
			return stk.id
		}
	}
	return 0
}

// newStack allocates a new stack of size n.
func (tab *traceStackTable) newStack(n int) *traceStack {
	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
}

// traceFrames returns the frames corresponding to pcs. It may
// allocate and may emit trace events.
func traceFrames(bufp traceBufPtr, pcs []uintptr) ([]traceFrame, traceBufPtr) {
	frames := make([]traceFrame, 0, len(pcs))
	ci := CallersFrames(pcs)
	for {
		var frame traceFrame
		f, more := ci.Next()
		frame, bufp = traceFrameForPC(bufp, 0, f)
		frames = append(frames, frame)
		if !more {
			return frames, bufp
		}
	}
}

// dump writes all previously cached stacks to trace buffers,
// releases all memory and resets state.
//
// This must run on the system stack because it calls traceFlush.
//
//go:systemstack
func (tab *traceStackTable) dump(bufp traceBufPtr) traceBufPtr {
	for i := range tab.tab {
		stk := tab.tab[i].ptr()
		for ; stk != nil; stk = stk.link.ptr() {
			var frames []traceFrame
			frames, bufp = traceFrames(bufp, fpunwindExpand(stk.stack()))

			// Estimate the size of this record. This
			// bound is pretty loose, but avoids counting
			// lots of varint sizes.
			maxSize := 1 + traceBytesPerNumber + (2+4*len(frames))*traceBytesPerNumber
			// Make sure we have enough buffer space.
			if buf := bufp.ptr(); len(buf.arr)-buf.pos < maxSize {
				bufp = traceFlush(bufp, 0)
			}

			// Emit header, with space reserved for length.
			buf := bufp.ptr()
			buf.byte(traceEvStack | 3<<traceArgCountShift)
			lenPos := buf.pos
			buf.pos += traceBytesPerNumber

			// Emit body.
			recPos := buf.pos
			buf.varint(uint64(stk.id))
			buf.varint(uint64(len(frames)))
			for _, frame := range frames {
				buf.varint(uint64(frame.PC))
				buf.varint(frame.funcID)
				buf.varint(frame.fileID)
				buf.varint(frame.line)
			}

			// Fill in size header.
			buf.varintAt(lenPos, uint64(buf.pos-recPos))
		}
	}

	tab.mem.drop()
	*tab = traceStackTable{}
	lockInit(&((*tab).lock), lockRankTraceStackTab)

	return bufp
}

// fpunwindExpand checks if pcBuf contains logical frames (which include inlined
// frames) or physical frames (produced by frame pointer unwinding) using a
// sentinel value in pcBuf[0]. Logical frames are simply returned without the
// sentinel. Physical frames are turned into logical frames via inline unwinding
// and by applying the skip value that's stored in pcBuf[0].
func fpunwindExpand(pcBuf []uintptr) []uintptr {
	if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
		// pcBuf contains logical rather than inlined frames, skip has already been
		// applied, just return it without the sentinel value in pcBuf[0].
		return pcBuf[1:]
	}

	var (
		lastFuncID = abi.FuncIDNormal
		newPCBuf   = make([]uintptr, 0, traceStackSize)
		skip       = pcBuf[0]
		// skipOrAdd skips or appends retPC to newPCBuf and returns true if more
		// pcs can be added.
		skipOrAdd = func(retPC uintptr) bool {
			if skip > 0 {
				skip--
			} else {
				newPCBuf = append(newPCBuf, retPC)
			}
			return len(newPCBuf) < cap(newPCBuf)
		}
	)

outer:
	for _, retPC := range pcBuf[1:] {
		callPC := retPC - 1
		fi := findfunc(callPC)
		if !fi.valid() {
			// There is no funcInfo if callPC belongs to a C function. In this case
			// we still keep the pc, but don't attempt to expand inlined frames.
			if more := skipOrAdd(retPC); !more {
				break outer
			}
			continue
		}

		u, uf := newInlineUnwinder(fi, callPC)
		for ; uf.valid(); uf = u.next(uf) {
			sf := u.srcFunc(uf)
			if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
				// ignore wrappers
			} else if more := skipOrAdd(uf.pc + 1); !more {
				break outer
			}
			lastFuncID = sf.funcID
		}
	}
	return newPCBuf
}

type traceFrame struct {
	PC     uintptr
	funcID uint64
	fileID uint64
	line   uint64
}

// traceFrameForPC records the frame information.
// It may allocate memory.
func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
	bufp := &buf
	var frame traceFrame
	frame.PC = f.PC

	fn := f.Function
	const maxLen = 1 << 10
	if len(fn) > maxLen {
		fn = fn[len(fn)-maxLen:]
	}
	frame.funcID, bufp = traceString(bufp, pid, fn)
	frame.line = uint64(f.Line)
	file := f.File
	if len(file) > maxLen {
		file = file[len(file)-maxLen:]
	}
	frame.fileID, bufp = traceString(bufp, pid, file)
	return frame, (*bufp)
}

// traceAlloc is a non-thread-safe region allocator.
// It holds a linked list of traceAllocBlock.
type traceAlloc struct {
	head traceAllocBlockPtr
	off  uintptr
}

// traceAllocBlock is a block in traceAlloc.
//
// traceAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceAllocBlocks do
// not need write barriers.
type traceAllocBlock struct {
	_    sys.NotInHeap
	next traceAllocBlockPtr
	data [64<<10 - goarch.PtrSize]byte
}

// TODO: Since traceAllocBlock is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
type traceAllocBlockPtr uintptr

func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }

// alloc allocates n-byte block.
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
	n = alignUp(n, goarch.PtrSize)
	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
		if n > uintptr(len(a.head.ptr().data)) {
			throw("trace: alloc too large")
		}
		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
		if block == nil {
			throw("trace: out of memory")
		}
		block.next.set(a.head.ptr())
		a.head.set(block)
		a.off = 0
	}
	p := &a.head.ptr().data[a.off]
	a.off += n
	return unsafe.Pointer(p)
}

// drop frees all previously allocated memory and resets the allocator.
func (a *traceAlloc) drop() {
	for a.head != 0 {
		block := a.head.ptr()
		a.head.set(block.next.ptr())
		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
	}
}

// The following functions write specific events to trace.

func (_ traceLocker) Gomaxprocs(procs int32) {
	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
}

func (_ traceLocker) ProcStart() {
	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
}

func (_ traceLocker) ProcStop(pp *p) {
	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
	// to handle this we temporary employ the P.
	mp := acquirem()
	oldp := mp.p
	mp.p.set(pp)
	traceEvent(traceEvProcStop, -1)
	mp.p = oldp
	releasem(mp)
}

func (_ traceLocker) GCStart() {
	traceEvent(traceEvGCStart, 3, trace.seqGC)
	trace.seqGC++
}

func (_ traceLocker) GCDone() {
	traceEvent(traceEvGCDone, -1)
}

func (_ traceLocker) STWStart(reason stwReason) {
	// Don't trace if this STW is for trace start/stop, since traceEnabled
	// switches during a STW.
	if reason == stwStartTrace || reason == stwStopTrace {
		return
	}
	getg().m.trace.tracedSTWStart = true
	traceEvent(traceEvSTWStart, -1, uint64(reason))
}

func (_ traceLocker) STWDone() {
	mp := getg().m
	if !mp.trace.tracedSTWStart {
		return
	}
	mp.trace.tracedSTWStart = false
	traceEvent(traceEvSTWDone, -1)
}

// traceGCSweepStart prepares to trace a sweep loop. This does not
// emit any events until traceGCSweepSpan is called.
//
// traceGCSweepStart must be paired with traceGCSweepDone and there
// must be no preemption points between these two calls.
func (_ traceLocker) GCSweepStart() {
	// Delay the actual GCSweepStart event until the first span
	// sweep. If we don't sweep anything, don't emit any events.
	pp := getg().m.p.ptr()
	if pp.trace.inSweep {
		throw("double traceGCSweepStart")
	}
	pp.trace.inSweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
}

// traceGCSweepSpan traces the sweep of a single page.
//
// This may be called outside a traceGCSweepStart/traceGCSweepDone
// pair; however, it will not emit any trace events in this case.
func (_ traceLocker) GCSweepSpan(bytesSwept uintptr) {
	pp := getg().m.p.ptr()
	if pp.trace.inSweep {
		if pp.trace.swept == 0 {
			traceEvent(traceEvGCSweepStart, 1)
		}
		pp.trace.swept += bytesSwept
	}
}

func (_ traceLocker) GCSweepDone() {
	pp := getg().m.p.ptr()
	if !pp.trace.inSweep {
		throw("missing traceGCSweepStart")
	}
	if pp.trace.swept != 0 {
		traceEvent(traceEvGCSweepDone, -1, uint64(pp.trace.swept), uint64(pp.trace.reclaimed))
	}
	pp.trace.inSweep = false
}

func (_ traceLocker) GCMarkAssistStart() {
	traceEvent(traceEvGCMarkAssistStart, 1)
}

func (_ traceLocker) GCMarkAssistDone() {
	traceEvent(traceEvGCMarkAssistDone, -1)
}

func (_ traceLocker) GoCreate(newg *g, pc uintptr) {
	newg.trace.seq = 0
	newg.trace.lastP = getg().m.p
	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
	id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(pc) + sys.PCQuantum})
	traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
}

func (_ traceLocker) GoStart() {
	gp := getg().m.curg
	pp := gp.m.p
	gp.trace.seq++
	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
		traceEvent(traceEvGoStartLabel, -1, gp.goid, gp.trace.seq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
	} else if gp.trace.lastP == pp {
		traceEvent(traceEvGoStartLocal, -1, gp.goid)
	} else {
		gp.trace.lastP = pp
		traceEvent(traceEvGoStart, -1, gp.goid, gp.trace.seq)
	}
}

func (_ traceLocker) GoEnd() {
	traceEvent(traceEvGoEnd, -1)
}

func (_ traceLocker) GoSched() {
	gp := getg()
	gp.trace.lastP = gp.m.p
	traceEvent(traceEvGoSched, 1)
}

func (_ traceLocker) GoPreempt() {
	gp := getg()
	gp.trace.lastP = gp.m.p
	traceEvent(traceEvGoPreempt, 1)
}

func (_ traceLocker) GoPark(reason traceBlockReason, skip int) {
	// Convert the block reason directly to a trace event type.
	// See traceBlockReason for more information.
	traceEvent(byte(reason), skip)
}

func (_ traceLocker) GoUnpark(gp *g, skip int) {
	pp := getg().m.p
	gp.trace.seq++
	if gp.trace.lastP == pp {
		traceEvent(traceEvGoUnblockLocal, skip, gp.goid)
	} else {
		gp.trace.lastP = pp
		traceEvent(traceEvGoUnblock, skip, gp.goid, gp.trace.seq)
	}
}

func (_ traceLocker) GoSysCall() {
	var skip int
	switch {
	case tracefpunwindoff():
		// Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
		// frames above this frame. For frame pointer unwinding we produce the same
		// results by hard coding the number of frames in between our caller and the
		// actual syscall, see cases below.
		// TODO(felixge): Implement gp.syscallbp to avoid this workaround?
		skip = 1
	case GOOS == "solaris" || GOOS == "illumos":
		// These platforms don't use a libc_read_trampoline.
		skip = 3
	default:
		// Skip the extra trampoline frame used on most systems.
		skip = 4
	}
	getg().m.curg.trace.tracedSyscallEnter = true
	traceEvent(traceEvGoSysCall, skip)
}

func (_ traceLocker) GoSysExit(lostP bool) {
	if !lostP {
		throw("lostP must always be true in the old tracer for GoSysExit")
	}
	gp := getg().m.curg
	if !gp.trace.tracedSyscallEnter {
		// There was no syscall entry traced for us at all, so there's definitely
		// no EvGoSysBlock or EvGoInSyscall before us, which EvGoSysExit requires.
		return
	}
	gp.trace.tracedSyscallEnter = false
	ts := gp.trace.sysExitTime
	if ts != 0 && ts < trace.startTime {
		// There is a race between the code that initializes sysExitTimes
		// (in exitsyscall, which runs without a P, and therefore is not
		// stopped with the rest of the world) and the code that initializes
		// a new trace. The recorded sysExitTime must therefore be treated
		// as "best effort". If they are valid for this trace, then great,
		// use them for greater accuracy. But if they're not valid for this
		// trace, assume that the trace was started after the actual syscall
		// exit (but before we actually managed to start the goroutine,
		// aka right now), and assign a fresh time stamp to keep the log consistent.
		ts = 0
	}
	gp.trace.sysExitTime = 0
	gp.trace.seq++
	gp.trace.lastP = gp.m.p
	traceEvent(traceEvGoSysExit, -1, gp.goid, gp.trace.seq, uint64(ts))
}

// nosplit because it's called from exitsyscall without a P.
//
//go:nosplit
func (_ traceLocker) RecordSyscallExitedTime(gp *g, oldp *p) {
	// Wait till traceGoSysBlock event is emitted.
	// This ensures consistency of the trace (the goroutine is started after it is blocked).
	for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
		osyield()
	}
	// We can't trace syscall exit right now because we don't have a P.
	// Tracing code can invoke write barriers that cannot run without a P.
	// So instead we remember the syscall exit time and emit the event
	// in execute when we have a P.
	gp.trace.sysExitTime = traceClockNow()
}

func (_ traceLocker) GoSysBlock(pp *p) {
	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
	// to handle this we temporary employ the P.
	mp := acquirem()
	oldp := mp.p
	mp.p.set(pp)
	traceEvent(traceEvGoSysBlock, -1)
	mp.p = oldp
	releasem(mp)
}

func (t traceLocker) ProcSteal(pp *p, forMe bool) {
	t.ProcStop(pp)
}

func (_ traceLocker) HeapAlloc(live uint64) {
	traceEvent(traceEvHeapAlloc, -1, live)
}

func (_ traceLocker) HeapGoal() {
	heapGoal := gcController.heapGoal()
	if heapGoal == ^uint64(0) {
		// Heap-based triggering is disabled.
		traceEvent(traceEvHeapGoal, -1, 0)
	} else {
		traceEvent(traceEvHeapGoal, -1, heapGoal)
	}
}

// To access runtime functions from runtime/trace.
// See runtime/trace/annotation.go

//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
func trace_userTaskCreate(id, parentID uint64, taskType string) {
	if !trace.enabled {
		return
	}

	// Same as in traceEvent.
	mp, pid, bufp := traceAcquireBuffer()
	if !trace.enabled && !mp.trace.startingTrace {
		traceReleaseBuffer(mp, pid)
		return
	}

	typeStringID, bufp := traceString(bufp, pid, taskType)
	traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
	traceReleaseBuffer(mp, pid)
}

//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
func trace_userTaskEnd(id uint64) {
	traceEvent(traceEvUserTaskEnd, 2, id)
}

//go:linkname trace_userRegion runtime/trace.userRegion
func trace_userRegion(id, mode uint64, name string) {
	if !trace.enabled {
		return
	}

	mp, pid, bufp := traceAcquireBuffer()
	if !trace.enabled && !mp.trace.startingTrace {
		traceReleaseBuffer(mp, pid)
		return
	}

	nameStringID, bufp := traceString(bufp, pid, name)
	traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
	traceReleaseBuffer(mp, pid)
}

//go:linkname trace_userLog runtime/trace.userLog
func trace_userLog(id uint64, category, message string) {
	if !trace.enabled {
		return
	}

	mp, pid, bufp := traceAcquireBuffer()
	if !trace.enabled && !mp.trace.startingTrace {
		traceReleaseBuffer(mp, pid)
		return
	}

	categoryID, bufp := traceString(bufp, pid, category)

	// The log message is recorded after all of the normal trace event
	// arguments, including the task, category, and stack IDs. We must ask
	// traceEventLocked to reserve extra space for the length of the message
	// and the message itself.
	extraSpace := traceBytesPerNumber + len(message)
	traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
	buf := bufp.ptr()

	// double-check the message and its length can fit.
	// Otherwise, truncate the message.
	slen := len(message)
	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
		slen = room
	}
	buf.varint(uint64(slen))
	buf.pos += copy(buf.arr[buf.pos:], message[:slen])

	traceReleaseBuffer(mp, pid)
}

// the start PC of a goroutine for tracing purposes. If pc is a wrapper,
// it returns the PC of the wrapped function. Otherwise it returns pc.
func startPCforTrace(pc uintptr) uintptr {
	f := findfunc(pc)
	if !f.valid() {
		return pc // may happen for locked g in extra M since its pc is 0.
	}
	w := funcdata(f, abi.FUNCDATA_WrapInfo)
	if w == nil {
		return pc // not a wrapper
	}
	return f.datap.textAddr(*(*uint32)(w))
}

// OneNewExtraM registers the fact that a new extra M was created with
// the tracer. This matters if the M (which has an attached G) is used while
// the trace is still active because if it is, we need the fact that it exists
// to show up in the final trace.
func (tl traceLocker) OneNewExtraM(gp *g) {
	// Trigger two trace events for the locked g in the extra m,
	// since the next event of the g will be traceEvGoSysExit in exitsyscall,
	// while calling from C thread to Go.
	tl.GoCreate(gp, 0) // no start pc
	gp.trace.seq++
	traceEvent(traceEvGoInSyscall, -1, gp.goid)
}

// Used only in the new tracer.
func (tl traceLocker) GoCreateSyscall(gp *g) {
}

// Used only in the new tracer.
func (tl traceLocker) GoDestroySyscall() {
}

// traceTime represents a timestamp for the trace.
type traceTime uint64

// traceClockNow returns a monotonic timestamp. The clock this function gets
// the timestamp from is specific to tracing, and shouldn't be mixed with other
// clock sources.
//
// nosplit because it's called from exitsyscall, which is nosplit.
//
//go:nosplit
func traceClockNow() traceTime {
	return traceTime(cputicks() / traceTimeDiv)
}

func traceExitingSyscall() {
}

func traceExitedSyscall() {
}

// Not used in the old tracer. Defined for compatibility.
const defaultTraceAdvancePeriod = 0