summaryrefslogtreecommitdiffstats
path: root/src/runtime/arena.go
blob: f9806c545e5de6231b2d25e439e3d61411d9e907 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Implementation of (safe) user arenas.
//
// This file contains the implementation of user arenas wherein Go values can
// be manually allocated and freed in bulk. The act of manually freeing memory,
// potentially before a GC cycle, means that a garbage collection cycle can be
// delayed, improving efficiency by reducing GC cycle frequency. There are other
// potential efficiency benefits, such as improved locality and access to a more
// efficient allocation strategy.
//
// What makes the arenas here safe is that once they are freed, accessing the
// arena's memory will cause an explicit program fault, and the arena's address
// space will not be reused until no more pointers into it are found. There's one
// exception to this: if an arena allocated memory that isn't exhausted, it's placed
// back into a pool for reuse. This means that a crash is not always guaranteed.
//
// While this may seem unsafe, it still prevents memory corruption, and is in fact
// necessary in order to make new(T) a valid implementation of arenas. Such a property
// is desirable to allow for a trivial implementation. (It also avoids complexities
// that arise from synchronization with the GC when trying to set the arena chunks to
// fault while the GC is active.)
//
// The implementation works in layers. At the bottom, arenas are managed in chunks.
// Each chunk must be a multiple of the heap arena size, or the heap arena size must
// be divisible by the arena chunks. The address space for each chunk, and each
// corresponding heapArena for that address space, are eternally reserved for use as
// arena chunks. That is, they can never be used for the general heap. Each chunk
// is also represented by a single mspan, and is modeled as a single large heap
// allocation. It must be, because each chunk contains ordinary Go values that may
// point into the heap, so it must be scanned just like any other object. Any
// pointer into a chunk will therefore always cause the whole chunk to be scanned
// while its corresponding arena is still live.
//
// Chunks may be allocated either from new memory mapped by the OS on our behalf,
// or by reusing old freed chunks. When chunks are freed, their underlying memory
// is returned to the OS, set to fault on access, and may not be reused until the
// program doesn't point into the chunk anymore (the code refers to this state as
// "quarantined"), a property checked by the GC.
//
// The sweeper handles moving chunks out of this quarantine state to be ready for
// reuse. When the chunk is placed into the quarantine state, its corresponding
// span is marked as noscan so that the GC doesn't try to scan memory that would
// cause a fault.
//
// At the next layer are the user arenas themselves. They consist of a single
// active chunk which new Go values are bump-allocated into and a list of chunks
// that were exhausted when allocating into the arena. Once the arena is freed,
// it frees all full chunks it references, and places the active one onto a reuse
// list for a future arena to use. Each arena keeps its list of referenced chunks
// explicitly live until it is freed. Each user arena also maps to an object which
// has a finalizer attached that ensures the arena's chunks are all freed even if
// the arena itself is never explicitly freed.
//
// Pointer-ful memory is bump-allocated from low addresses to high addresses in each
// chunk, while pointer-free memory is bump-allocated from high address to low
// addresses. The reason for this is to take advantage of a GC optimization wherein
// the GC will stop scanning an object when there are no more pointers in it, which
// also allows us to elide clearing the heap bitmap for pointer-free Go values
// allocated into arenas.
//
// Note that arenas are not safe to use concurrently.
//
// In summary, there are 2 resources: arenas, and arena chunks. They exist in the
// following lifecycle:
//
// (1) A new arena is created via newArena.
// (2) Chunks are allocated to hold memory allocated into the arena with new or slice.
//    (a) Chunks are first allocated from the reuse list of partially-used chunks.
//    (b) If there are no such chunks, then chunks on the ready list are taken.
//    (c) Failing all the above, memory for a new chunk is mapped.
// (3) The arena is freed, or all references to it are dropped, triggering its finalizer.
//    (a) If the GC is not active, exhausted chunks are set to fault and placed on a
//        quarantine list.
//    (b) If the GC is active, exhausted chunks are placed on a fault list and will
//        go through step (a) at a later point in time.
//    (c) Any remaining partially-used chunk is placed on a reuse list.
// (4) Once no more pointers are found into quarantined arena chunks, the sweeper
//     takes these chunks out of quarantine and places them on the ready list.

package runtime

import (
	"internal/goarch"
	"runtime/internal/atomic"
	"runtime/internal/math"
	"unsafe"
)

// Functions starting with arena_ are meant to be exported to downstream users
// of arenas. They should wrap these functions in a higher-lever API.
//
// The underlying arena and its resources are managed through an opaque unsafe.Pointer.

// arena_newArena is a wrapper around newUserArena.
//
//go:linkname arena_newArena arena.runtime_arena_newArena
func arena_newArena() unsafe.Pointer {
	return unsafe.Pointer(newUserArena())
}

// arena_arena_New is a wrapper around (*userArena).new, except that typ
// is an any (must be a *_type, still) and typ must be a type descriptor
// for a pointer to the type to actually be allocated, i.e. pass a *T
// to allocate a T. This is necessary because this function returns a *T.
//
//go:linkname arena_arena_New arena.runtime_arena_arena_New
func arena_arena_New(arena unsafe.Pointer, typ any) any {
	t := (*_type)(efaceOf(&typ).data)
	if t.Kind_&kindMask != kindPtr {
		throw("arena_New: non-pointer type")
	}
	te := (*ptrtype)(unsafe.Pointer(t)).Elem
	x := ((*userArena)(arena)).new(te)
	var result any
	e := efaceOf(&result)
	e._type = t
	e.data = x
	return result
}

// arena_arena_Slice is a wrapper around (*userArena).slice.
//
//go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice
func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int) {
	((*userArena)(arena)).slice(slice, cap)
}

// arena_arena_Free is a wrapper around (*userArena).free.
//
//go:linkname arena_arena_Free arena.runtime_arena_arena_Free
func arena_arena_Free(arena unsafe.Pointer) {
	((*userArena)(arena)).free()
}

// arena_heapify takes a value that lives in an arena and makes a copy
// of it on the heap. Values that don't live in an arena are returned unmodified.
//
//go:linkname arena_heapify arena.runtime_arena_heapify
func arena_heapify(s any) any {
	var v unsafe.Pointer
	e := efaceOf(&s)
	t := e._type
	switch t.Kind_ & kindMask {
	case kindString:
		v = stringStructOf((*string)(e.data)).str
	case kindSlice:
		v = (*slice)(e.data).array
	case kindPtr:
		v = e.data
	default:
		panic("arena: Clone only supports pointers, slices, and strings")
	}
	span := spanOf(uintptr(v))
	if span == nil || !span.isUserArenaChunk {
		// Not stored in a user arena chunk.
		return s
	}
	// Heap-allocate storage for a copy.
	var x any
	switch t.Kind_ & kindMask {
	case kindString:
		s1 := s.(string)
		s2, b := rawstring(len(s1))
		copy(b, s1)
		x = s2
	case kindSlice:
		len := (*slice)(e.data).len
		et := (*slicetype)(unsafe.Pointer(t)).Elem
		sl := new(slice)
		*sl = slice{makeslicecopy(et, len, len, (*slice)(e.data).array), len, len}
		xe := efaceOf(&x)
		xe._type = t
		xe.data = unsafe.Pointer(sl)
	case kindPtr:
		et := (*ptrtype)(unsafe.Pointer(t)).Elem
		e2 := newobject(et)
		typedmemmove(et, e2, e.data)
		xe := efaceOf(&x)
		xe._type = t
		xe.data = e2
	}
	return x
}

const (
	// userArenaChunkBytes is the size of a user arena chunk.
	userArenaChunkBytesMax = 8 << 20
	userArenaChunkBytes    = uintptr(int64(userArenaChunkBytesMax-heapArenaBytes)&(int64(userArenaChunkBytesMax-heapArenaBytes)>>63) + heapArenaBytes) // min(userArenaChunkBytesMax, heapArenaBytes)

	// userArenaChunkPages is the number of pages a user arena chunk uses.
	userArenaChunkPages = userArenaChunkBytes / pageSize

	// userArenaChunkMaxAllocBytes is the maximum size of an object that can
	// be allocated from an arena. This number is chosen to cap worst-case
	// fragmentation of user arenas to 25%. Larger allocations are redirected
	// to the heap.
	userArenaChunkMaxAllocBytes = userArenaChunkBytes / 4
)

func init() {
	if userArenaChunkPages*pageSize != userArenaChunkBytes {
		throw("user arena chunk size is not a multiple of the page size")
	}
	if userArenaChunkBytes%physPageSize != 0 {
		throw("user arena chunk size is not a multiple of the physical page size")
	}
	if userArenaChunkBytes < heapArenaBytes {
		if heapArenaBytes%userArenaChunkBytes != 0 {
			throw("user arena chunk size is smaller than a heap arena, but doesn't divide it")
		}
	} else {
		if userArenaChunkBytes%heapArenaBytes != 0 {
			throw("user arena chunks size is larger than a heap arena, but not a multiple")
		}
	}
	lockInit(&userArenaState.lock, lockRankUserArenaState)
}

type userArena struct {
	// full is a list of full chunks that have not enough free memory left, and
	// that we'll free once this user arena is freed.
	//
	// Can't use mSpanList here because it's not-in-heap.
	fullList *mspan

	// active is the user arena chunk we're currently allocating into.
	active *mspan

	// refs is a set of references to the arena chunks so that they're kept alive.
	//
	// The last reference in the list always refers to active, while the rest of
	// them correspond to fullList. Specifically, the head of fullList is the
	// second-to-last one, fullList.next is the third-to-last, and so on.
	//
	// In other words, every time a new chunk becomes active, its appended to this
	// list.
	refs []unsafe.Pointer

	// defunct is true if free has been called on this arena.
	//
	// This is just a best-effort way to discover a concurrent allocation
	// and free. Also used to detect a double-free.
	defunct atomic.Bool
}

// newUserArena creates a new userArena ready to be used.
func newUserArena() *userArena {
	a := new(userArena)
	SetFinalizer(a, func(a *userArena) {
		// If arena handle is dropped without being freed, then call
		// free on the arena, so the arena chunks are never reclaimed
		// by the garbage collector.
		a.free()
	})
	a.refill()
	return a
}

// new allocates a new object of the provided type into the arena, and returns
// its pointer.
//
// This operation is not safe to call concurrently with other operations on the
// same arena.
func (a *userArena) new(typ *_type) unsafe.Pointer {
	return a.alloc(typ, -1)
}

// slice allocates a new slice backing store. slice must be a pointer to a slice
// (i.e. *[]T), because userArenaSlice will update the slice directly.
//
// cap determines the capacity of the slice backing store and must be non-negative.
//
// This operation is not safe to call concurrently with other operations on the
// same arena.
func (a *userArena) slice(sl any, cap int) {
	if cap < 0 {
		panic("userArena.slice: negative cap")
	}
	i := efaceOf(&sl)
	typ := i._type
	if typ.Kind_&kindMask != kindPtr {
		panic("slice result of non-ptr type")
	}
	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
	if typ.Kind_&kindMask != kindSlice {
		panic("slice of non-ptr-to-slice type")
	}
	typ = (*slicetype)(unsafe.Pointer(typ)).Elem
	// t is now the element type of the slice we want to allocate.

	*((*slice)(i.data)) = slice{a.alloc(typ, cap), cap, cap}
}

// free returns the userArena's chunks back to mheap and marks it as defunct.
//
// Must be called at most once for any given arena.
//
// This operation is not safe to call concurrently with other operations on the
// same arena.
func (a *userArena) free() {
	// Check for a double-free.
	if a.defunct.Load() {
		panic("arena double free")
	}

	// Mark ourselves as defunct.
	a.defunct.Store(true)
	SetFinalizer(a, nil)

	// Free all the full arenas.
	//
	// The refs on this list are in reverse order from the second-to-last.
	s := a.fullList
	i := len(a.refs) - 2
	for s != nil {
		a.fullList = s.next
		s.next = nil
		freeUserArenaChunk(s, a.refs[i])
		s = a.fullList
		i--
	}
	if a.fullList != nil || i >= 0 {
		// There's still something left on the full list, or we
		// failed to actually iterate over the entire refs list.
		throw("full list doesn't match refs list in length")
	}

	// Put the active chunk onto the reuse list.
	//
	// Note that active's reference is always the last reference in refs.
	s = a.active
	if s != nil {
		if raceenabled || msanenabled || asanenabled {
			// Don't reuse arenas with sanitizers enabled. We want to catch
			// any use-after-free errors aggressively.
			freeUserArenaChunk(s, a.refs[len(a.refs)-1])
		} else {
			lock(&userArenaState.lock)
			userArenaState.reuse = append(userArenaState.reuse, liveUserArenaChunk{s, a.refs[len(a.refs)-1]})
			unlock(&userArenaState.lock)
		}
	}
	// nil out a.active so that a race with freeing will more likely cause a crash.
	a.active = nil
	a.refs = nil
}

// alloc reserves space in the current chunk or calls refill and reserves space
// in a new chunk. If cap is negative, the type will be taken literally, otherwise
// it will be considered as an element type for a slice backing store with capacity
// cap.
func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer {
	s := a.active
	var x unsafe.Pointer
	for {
		x = s.userArenaNextFree(typ, cap)
		if x != nil {
			break
		}
		s = a.refill()
	}
	return x
}

// refill inserts the current arena chunk onto the full list and obtains a new
// one, either from the partial list or allocating a new one, both from mheap.
func (a *userArena) refill() *mspan {
	// If there's an active chunk, assume it's full.
	s := a.active
	if s != nil {
		if s.userArenaChunkFree.size() > userArenaChunkMaxAllocBytes {
			// It's difficult to tell when we're actually out of memory
			// in a chunk because the allocation that failed may still leave
			// some free space available. However, that amount of free space
			// should never exceed the maximum allocation size.
			throw("wasted too much memory in an arena chunk")
		}
		s.next = a.fullList
		a.fullList = s
		a.active = nil
		s = nil
	}
	var x unsafe.Pointer

	// Check the partially-used list.
	lock(&userArenaState.lock)
	if len(userArenaState.reuse) > 0 {
		// Pick off the last arena chunk from the list.
		n := len(userArenaState.reuse) - 1
		x = userArenaState.reuse[n].x
		s = userArenaState.reuse[n].mspan
		userArenaState.reuse[n].x = nil
		userArenaState.reuse[n].mspan = nil
		userArenaState.reuse = userArenaState.reuse[:n]
	}
	unlock(&userArenaState.lock)
	if s == nil {
		// Allocate a new one.
		x, s = newUserArenaChunk()
		if s == nil {
			throw("out of memory")
		}
	}
	a.refs = append(a.refs, x)
	a.active = s
	return s
}

type liveUserArenaChunk struct {
	*mspan // Must represent a user arena chunk.

	// Reference to mspan.base() to keep the chunk alive.
	x unsafe.Pointer
}

var userArenaState struct {
	lock mutex

	// reuse contains a list of partially-used and already-live
	// user arena chunks that can be quickly reused for another
	// arena.
	//
	// Protected by lock.
	reuse []liveUserArenaChunk

	// fault contains full user arena chunks that need to be faulted.
	//
	// Protected by lock.
	fault []liveUserArenaChunk
}

// userArenaNextFree reserves space in the user arena for an item of the specified
// type. If cap is not -1, this is for an array of cap elements of type t.
func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
	size := typ.Size_
	if cap > 0 {
		if size > ^uintptr(0)/uintptr(cap) {
			// Overflow.
			throw("out of memory")
		}
		size *= uintptr(cap)
	}
	if size == 0 || cap == 0 {
		return unsafe.Pointer(&zerobase)
	}
	if size > userArenaChunkMaxAllocBytes {
		// Redirect allocations that don't fit into a chunk well directly
		// from the heap.
		if cap >= 0 {
			return newarray(typ, cap)
		}
		return newobject(typ)
	}

	// Prevent preemption as we set up the space for a new object.
	//
	// Act like we're allocating.
	mp := acquirem()
	if mp.mallocing != 0 {
		throw("malloc deadlock")
	}
	if mp.gsignal == getg() {
		throw("malloc during signal")
	}
	mp.mallocing = 1

	var ptr unsafe.Pointer
	if typ.PtrBytes == 0 {
		// Allocate pointer-less objects from the tail end of the chunk.
		v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_)
		if ok {
			ptr = unsafe.Pointer(v)
		}
	} else {
		v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_)
		if ok {
			ptr = unsafe.Pointer(v)
		}
	}
	if ptr == nil {
		// Failed to allocate.
		mp.mallocing = 0
		releasem(mp)
		return nil
	}
	if s.needzero != 0 {
		throw("arena chunk needs zeroing, but should already be zeroed")
	}
	// Set up heap bitmap and do extra accounting.
	if typ.PtrBytes != 0 {
		if cap >= 0 {
			userArenaHeapBitsSetSliceType(typ, cap, ptr, s.base())
		} else {
			userArenaHeapBitsSetType(typ, ptr, s.base())
		}
		c := getMCache(mp)
		if c == nil {
			throw("mallocgc called without a P or outside bootstrapping")
		}
		if cap > 0 {
			c.scanAlloc += size - (typ.Size_ - typ.PtrBytes)
		} else {
			c.scanAlloc += typ.PtrBytes
		}
	}

	// Ensure that the stores above that initialize x to
	// type-safe memory and set the heap bits occur before
	// the caller can make ptr observable to the garbage
	// collector. Otherwise, on weakly ordered machines,
	// the garbage collector could follow a pointer to x,
	// but see uninitialized memory or stale heap bits.
	publicationBarrier()

	mp.mallocing = 0
	releasem(mp)

	return ptr
}

// userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for
// non-slice-backing-store Go values allocated in a user arena chunk. It
// sets up the heap bitmap for the value with type typ allocated at address ptr.
// base is the base address of the arena chunk.
func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
	h := writeHeapBitsForAddr(uintptr(ptr))

	// Our last allocation might have ended right at a noMorePtrs mark,
	// which we would not have erased. We need to erase that mark here,
	// because we're going to start adding new heap bitmap bits.
	// We only need to clear one mark, because below we make sure to
	// pad out the bits with zeroes and only write one noMorePtrs bit
	// for each new object.
	// (This is only necessary at noMorePtrs boundaries, as noMorePtrs
	// marks within an object allocated with newAt will be erased by
	// the normal writeHeapBitsForAddr mechanism.)
	//
	// Note that we skip this if this is the first allocation in the
	// arena because there's definitely no previous noMorePtrs mark
	// (in fact, we *must* do this, because we're going to try to back
	// up a pointer to fix this up).
	if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
		// Back up one pointer and rewrite that pointer. That will
		// cause the writeHeapBits implementation to clear the
		// noMorePtrs bit we need to clear.
		r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
		_, p := r.next()
		b := uintptr(0)
		if p == uintptr(ptr)-goarch.PtrSize {
			b = 1
		}
		h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
		h = h.write(b, 1)
	}

	p := typ.GCData // start of 1-bit pointer mask (or GC program)
	var gcProgBits uintptr
	if typ.Kind_&kindGCProg != 0 {
		// Expand gc program, using the object itself for storage.
		gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
		p = (*byte)(ptr)
	}
	nb := typ.PtrBytes / goarch.PtrSize

	for i := uintptr(0); i < nb; i += ptrBits {
		k := nb - i
		if k > ptrBits {
			k = ptrBits
		}
		h = h.write(readUintptr(addb(p, i/8)), k)
	}
	// Note: we call pad here to ensure we emit explicit 0 bits
	// for the pointerless tail of the object. This ensures that
	// there's only a single noMorePtrs mark for the next object
	// to clear. We don't need to do this to clear stale noMorePtrs
	// markers from previous uses because arena chunk pointer bitmaps
	// are always fully cleared when reused.
	h = h.pad(typ.Size_ - typ.PtrBytes)
	h.flush(uintptr(ptr), typ.Size_)

	if typ.Kind_&kindGCProg != 0 {
		// Zero out temporary ptrmask buffer inside object.
		memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
	}

	// Double-check that the bitmap was written out correctly.
	//
	// Derived from heapBitsSetType.
	const doubleCheck = false
	if doubleCheck {
		size := typ.Size_
		x := uintptr(ptr)
		h := heapBitsForAddr(x, size)
		for i := uintptr(0); i < size; i += goarch.PtrSize {
			// Compute the pointer bit we want at offset i.
			want := false
			off := i % typ.Size_
			if off < typ.PtrBytes {
				j := off / goarch.PtrSize
				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
			}
			if want {
				var addr uintptr
				h, addr = h.next()
				if addr != x+i {
					throw("userArenaHeapBitsSetType: pointer entry not correct")
				}
			}
		}
		if _, addr := h.next(); addr != 0 {
			throw("userArenaHeapBitsSetType: extra pointer")
		}
	}
}

// userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
// Go slice backing store values allocated in a user arena chunk. It sets up the
// heap bitmap for n consecutive values with type typ allocated at address ptr.
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, base uintptr) {
	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
	if overflow || n < 0 || mem > maxAlloc {
		panic(plainError("runtime: allocation size out of range"))
	}
	for i := 0; i < n; i++ {
		userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), base)
	}
}

// newUserArenaChunk allocates a user arena chunk, which maps to a single
// heap arena and single span. Returns a pointer to the base of the chunk
// (this is really important: we need to keep the chunk alive) and the span.
func newUserArenaChunk() (unsafe.Pointer, *mspan) {
	if gcphase == _GCmarktermination {
		throw("newUserArenaChunk called with gcphase == _GCmarktermination")
	}

	// Deduct assist credit. Because user arena chunks are modeled as one
	// giant heap object which counts toward heapLive, we're obligated to
	// assist the GC proportionally (and it's worth noting that the arena
	// does represent additional work for the GC, but we also have no idea
	// what that looks like until we actually allocate things into the
	// arena).
	deductAssistCredit(userArenaChunkBytes)

	// Set mp.mallocing to keep from being preempted by GC.
	mp := acquirem()
	if mp.mallocing != 0 {
		throw("malloc deadlock")
	}
	if mp.gsignal == getg() {
		throw("malloc during signal")
	}
	mp.mallocing = 1

	// Allocate a new user arena.
	var span *mspan
	systemstack(func() {
		span = mheap_.allocUserArenaChunk()
	})
	if span == nil {
		throw("out of memory")
	}
	x := unsafe.Pointer(span.base())

	// Allocate black during GC.
	// All slots hold nil so no scanning is needed.
	// This may be racing with GC so do it atomically if there can be
	// a race marking the bit.
	if gcphase != _GCoff {
		gcmarknewobject(span, span.base(), span.elemsize)
	}

	if raceenabled {
		// TODO(mknyszek): Track individual objects.
		racemalloc(unsafe.Pointer(span.base()), span.elemsize)
	}

	if msanenabled {
		// TODO(mknyszek): Track individual objects.
		msanmalloc(unsafe.Pointer(span.base()), span.elemsize)
	}

	if asanenabled {
		// TODO(mknyszek): Track individual objects.
		rzSize := computeRZlog(span.elemsize)
		span.elemsize -= rzSize
		span.limit -= rzSize
		span.userArenaChunkFree = makeAddrRange(span.base(), span.limit)
		asanpoison(unsafe.Pointer(span.limit), span.npages*pageSize-span.elemsize)
		asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
	}

	if rate := MemProfileRate; rate > 0 {
		c := getMCache(mp)
		if c == nil {
			throw("newUserArenaChunk called without a P or outside bootstrapping")
		}
		// Note cache c only valid while m acquired; see #47302
		if rate != 1 && userArenaChunkBytes < c.nextSample {
			c.nextSample -= userArenaChunkBytes
		} else {
			profilealloc(mp, unsafe.Pointer(span.base()), userArenaChunkBytes)
		}
	}
	mp.mallocing = 0
	releasem(mp)

	// Again, because this chunk counts toward heapLive, potentially trigger a GC.
	if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
		gcStart(t)
	}

	if debug.malloc {
		if debug.allocfreetrace != 0 {
			tracealloc(unsafe.Pointer(span.base()), userArenaChunkBytes, nil)
		}

		if inittrace.active && inittrace.id == getg().goid {
			// Init functions are executed sequentially in a single goroutine.
			inittrace.bytes += uint64(userArenaChunkBytes)
		}
	}

	// Double-check it's aligned to the physical page size. Based on the current
	// implementation this is trivially true, but it need not be in the future.
	// However, if it's not aligned to the physical page size then we can't properly
	// set it to fault later.
	if uintptr(x)%physPageSize != 0 {
		throw("user arena chunk is not aligned to the physical page size")
	}

	return x, span
}

// isUnusedUserArenaChunk indicates that the arena chunk has been set to fault
// and doesn't contain any scannable memory anymore. However, it might still be
// mSpanInUse as it sits on the quarantine list, since it needs to be swept.
//
// This is not safe to execute unless the caller has ownership of the mspan or
// the world is stopped (preemption is prevented while the relevant state changes).
//
// This is really only meant to be used by accounting tests in the runtime to
// distinguish when a span shouldn't be counted (since mSpanInUse might not be
// enough).
func (s *mspan) isUnusedUserArenaChunk() bool {
	return s.isUserArenaChunk && s.spanclass == makeSpanClass(0, true)
}

// setUserArenaChunkToFault sets the address space for the user arena chunk to fault
// and releases any underlying memory resources.
//
// Must be in a non-preemptible state to ensure the consistency of statistics
// exported to MemStats.
func (s *mspan) setUserArenaChunkToFault() {
	if !s.isUserArenaChunk {
		throw("invalid span in heapArena for user arena")
	}
	if s.npages*pageSize != userArenaChunkBytes {
		throw("span on userArena.faultList has invalid size")
	}

	// Update the span class to be noscan. What we want to happen is that
	// any pointer into the span keeps it from getting recycled, so we want
	// the mark bit to get set, but we're about to set the address space to fault,
	// so we have to prevent the GC from scanning this memory.
	//
	// It's OK to set it here because (1) a GC isn't in progress, so the scanning code
	// won't make a bad decision, (2) we're currently non-preemptible and in the runtime,
	// so a GC is blocked from starting. We might race with sweeping, which could
	// put it on the "wrong" sweep list, but really don't care because the chunk is
	// treated as a large object span and there's no meaningful difference between scan
	// and noscan large objects in the sweeper. The STW at the start of the GC acts as a
	// barrier for this update.
	s.spanclass = makeSpanClass(0, true)

	// Actually set the arena chunk to fault, so we'll get dangling pointer errors.
	// sysFault currently uses a method on each OS that forces it to evacuate all
	// memory backing the chunk.
	sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)

	// Everything on the list is counted as in-use, however sysFault transitions to
	// Reserved, not Prepared, so we skip updating heapFree or heapReleased and just
	// remove the memory from the total altogether; it's just address space now.
	gcController.heapInUse.add(-int64(s.npages * pageSize))

	// Count this as a free of an object right now as opposed to when
	// the span gets off the quarantine list. The main reason is so that the
	// amount of bytes allocated doesn't exceed how much is counted as
	// "mapped ready," which could cause a deadlock in the pacer.
	gcController.totalFree.Add(int64(s.npages * pageSize))

	// Update consistent stats to match.
	//
	// We're non-preemptible, so it's safe to update consistent stats (our P
	// won't change out from under us).
	stats := memstats.heapStats.acquire()
	atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
	atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
	atomic.Xadd64(&stats.largeFreeCount, 1)
	atomic.Xadd64(&stats.largeFree, int64(s.npages*pageSize))
	memstats.heapStats.release()

	// This counts as a free, so update heapLive.
	gcController.update(-int64(s.npages*pageSize), 0)

	// Mark it as free for the race detector.
	if raceenabled {
		racefree(unsafe.Pointer(s.base()), s.elemsize)
	}

	systemstack(func() {
		// Add the user arena to the quarantine list.
		lock(&mheap_.lock)
		mheap_.userArena.quarantineList.insert(s)
		unlock(&mheap_.lock)
	})
}

// inUserArenaChunk returns true if p points to a user arena chunk.
func inUserArenaChunk(p uintptr) bool {
	s := spanOf(p)
	if s == nil {
		return false
	}
	return s.isUserArenaChunk
}

// freeUserArenaChunk releases the user arena represented by s back to the runtime.
//
// x must be a live pointer within s.
//
// The runtime will set the user arena to fault once it's safe (the GC is no longer running)
// and then once the user arena is no longer referenced by the application, will allow it to
// be reused.
func freeUserArenaChunk(s *mspan, x unsafe.Pointer) {
	if !s.isUserArenaChunk {
		throw("span is not for a user arena")
	}
	if s.npages*pageSize != userArenaChunkBytes {
		throw("invalid user arena span size")
	}

	// Mark the region as free to various santizers immediately instead
	// of handling them at sweep time.
	if raceenabled {
		racefree(unsafe.Pointer(s.base()), s.elemsize)
	}
	if msanenabled {
		msanfree(unsafe.Pointer(s.base()), s.elemsize)
	}
	if asanenabled {
		asanpoison(unsafe.Pointer(s.base()), s.elemsize)
	}

	// Make ourselves non-preemptible as we manipulate state and statistics.
	//
	// Also required by setUserArenaChunksToFault.
	mp := acquirem()

	// We can only set user arenas to fault if we're in the _GCoff phase.
	if gcphase == _GCoff {
		lock(&userArenaState.lock)
		faultList := userArenaState.fault
		userArenaState.fault = nil
		unlock(&userArenaState.lock)

		s.setUserArenaChunkToFault()
		for _, lc := range faultList {
			lc.mspan.setUserArenaChunkToFault()
		}

		// Until the chunks are set to fault, keep them alive via the fault list.
		KeepAlive(x)
		KeepAlive(faultList)
	} else {
		// Put the user arena on the fault list.
		lock(&userArenaState.lock)
		userArenaState.fault = append(userArenaState.fault, liveUserArenaChunk{s, x})
		unlock(&userArenaState.lock)
	}
	releasem(mp)
}

// allocUserArenaChunk attempts to reuse a free user arena chunk represented
// as a span.
//
// Must be in a non-preemptible state to ensure the consistency of statistics
// exported to MemStats.
//
// Acquires the heap lock. Must run on the system stack for that reason.
//
//go:systemstack
func (h *mheap) allocUserArenaChunk() *mspan {
	var s *mspan
	var base uintptr

	// First check the free list.
	lock(&h.lock)
	if !h.userArena.readyList.isEmpty() {
		s = h.userArena.readyList.first
		h.userArena.readyList.remove(s)
		base = s.base()
	} else {
		// Free list was empty, so allocate a new arena.
		hintList := &h.userArena.arenaHints
		if raceenabled {
			// In race mode just use the regular heap hints. We might fragment
			// the address space, but the race detector requires that the heap
			// is mapped contiguously.
			hintList = &h.arenaHints
		}
		v, size := h.sysAlloc(userArenaChunkBytes, hintList, false)
		if size%userArenaChunkBytes != 0 {
			throw("sysAlloc size is not divisible by userArenaChunkBytes")
		}
		if size > userArenaChunkBytes {
			// We got more than we asked for. This can happen if
			// heapArenaSize > userArenaChunkSize, or if sysAlloc just returns
			// some extra as a result of trying to find an aligned region.
			//
			// Divide it up and put it on the ready list.
			for i := uintptr(userArenaChunkBytes); i < size; i += userArenaChunkBytes {
				s := h.allocMSpanLocked()
				s.init(uintptr(v)+i, userArenaChunkPages)
				h.userArena.readyList.insertBack(s)
			}
			size = userArenaChunkBytes
		}
		base = uintptr(v)
		if base == 0 {
			// Out of memory.
			unlock(&h.lock)
			return nil
		}
		s = h.allocMSpanLocked()
	}
	unlock(&h.lock)

	// sysAlloc returns Reserved address space, and any span we're
	// reusing is set to fault (so, also Reserved), so transition
	// it to Prepared and then Ready.
	//
	// Unlike (*mheap).grow, just map in everything that we
	// asked for. We're likely going to use it all.
	sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased)
	sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)

	// Model the user arena as a heap span for a large object.
	spc := makeSpanClass(0, false)
	h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
	s.isUserArenaChunk = true

	// Account for this new arena chunk memory.
	gcController.heapInUse.add(int64(userArenaChunkBytes))
	gcController.heapReleased.add(-int64(userArenaChunkBytes))

	stats := memstats.heapStats.acquire()
	atomic.Xaddint64(&stats.inHeap, int64(userArenaChunkBytes))
	atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes))

	// Model the arena as a single large malloc.
	atomic.Xadd64(&stats.largeAlloc, int64(userArenaChunkBytes))
	atomic.Xadd64(&stats.largeAllocCount, 1)
	memstats.heapStats.release()

	// Count the alloc in inconsistent, internal stats.
	gcController.totalAlloc.Add(int64(userArenaChunkBytes))

	// Update heapLive.
	gcController.update(int64(userArenaChunkBytes), 0)

	// Put the large span in the mcentral swept list so that it's
	// visible to the background sweeper.
	h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
	s.limit = s.base() + userArenaChunkBytes
	s.freeindex = 1
	s.allocCount = 1

	// This must clear the entire heap bitmap so that it's safe
	// to allocate noscan data without writing anything out.
	s.initHeapBits(true)

	// Clear the span preemptively. It's an arena chunk, so let's assume
	// everything is going to be used.
	//
	// This also seems to make a massive difference as to whether or
	// not Linux decides to back this memory with transparent huge
	// pages. There's latency involved in this zeroing, but the hugepage
	// gains are almost always worth it. Note: it's important that we
	// clear even if it's freshly mapped and we know there's no point
	// to zeroing as *that* is the critical signal to use huge pages.
	memclrNoHeapPointers(unsafe.Pointer(s.base()), s.elemsize)
	s.needzero = 0

	s.freeIndexForScan = 1

	// Set up the range for allocation.
	s.userArenaChunkFree = makeAddrRange(base, s.limit)
	return s
}