summaryrefslogtreecommitdiffstats
path: root/src/runtime/mcentral.go
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
commit47ab3d4a42e9ab51c465c4322d2ec233f6324e6b (patch)
treea61a0ffd83f4a3def4b36e5c8e99630c559aa723 /src/runtime/mcentral.go
parentInitial commit. (diff)
downloadgolang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.tar.xz
golang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.zip
Adding upstream version 1.18.10.upstream/1.18.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/runtime/mcentral.go')
-rw-r--r--src/runtime/mcentral.go255
1 files changed, 255 insertions, 0 deletions
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
new file mode 100644
index 0000000..e4bdf35
--- /dev/null
+++ b/src/runtime/mcentral.go
@@ -0,0 +1,255 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Central free lists.
+//
+// See malloc.go for an overview.
+//
+// The mcentral doesn't actually contain the list of free objects; the mspan does.
+// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
+// and those that are completely allocated (c->empty).
+
+package runtime
+
+import "runtime/internal/atomic"
+
+// Central list of free objects of a given size.
+//
+//go:notinheap
+type mcentral struct {
+ spanclass spanClass
+
+ // partial and full contain two mspan sets: one of swept in-use
+ // spans, and one of unswept in-use spans. These two trade
+ // roles on each GC cycle. The unswept set is drained either by
+ // allocation or by the background sweeper in every GC cycle,
+ // so only two roles are necessary.
+ //
+ // sweepgen is increased by 2 on each GC cycle, so the swept
+ // spans are in partial[sweepgen/2%2] and the unswept spans are in
+ // partial[1-sweepgen/2%2]. Sweeping pops spans from the
+ // unswept set and pushes spans that are still in-use on the
+ // swept set. Likewise, allocating an in-use span pushes it
+ // on the swept set.
+ //
+ // Some parts of the sweeper can sweep arbitrary spans, and hence
+ // can't remove them from the unswept set, but will add the span
+ // to the appropriate swept list. As a result, the parts of the
+ // sweeper and mcentral that do consume from the unswept list may
+ // encounter swept spans, and these should be ignored.
+ partial [2]spanSet // list of spans with a free object
+ full [2]spanSet // list of spans with no free objects
+}
+
+// Initialize a single central free list.
+func (c *mcentral) init(spc spanClass) {
+ c.spanclass = spc
+ lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
+ lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
+ lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
+ lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
+}
+
+// partialUnswept returns the spanSet which holds partially-filled
+// unswept spans for this sweepgen.
+func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
+ return &c.partial[1-sweepgen/2%2]
+}
+
+// partialSwept returns the spanSet which holds partially-filled
+// swept spans for this sweepgen.
+func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
+ return &c.partial[sweepgen/2%2]
+}
+
+// fullUnswept returns the spanSet which holds unswept spans without any
+// free slots for this sweepgen.
+func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
+ return &c.full[1-sweepgen/2%2]
+}
+
+// fullSwept returns the spanSet which holds swept spans without any
+// free slots for this sweepgen.
+func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
+ return &c.full[sweepgen/2%2]
+}
+
+// Allocate a span to use in an mcache.
+func (c *mcentral) cacheSpan() *mspan {
+ // Deduct credit for this span allocation and sweep if necessary.
+ spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
+ deductSweepCredit(spanBytes, 0)
+
+ traceDone := false
+ if trace.enabled {
+ traceGCSweepStart()
+ }
+
+ // If we sweep spanBudget spans without finding any free
+ // space, just allocate a fresh span. This limits the amount
+ // of time we can spend trying to find free space and
+ // amortizes the cost of small object sweeping over the
+ // benefit of having a full free span to allocate from. By
+ // setting this to 100, we limit the space overhead to 1%.
+ //
+ // TODO(austin,mknyszek): This still has bad worst-case
+ // throughput. For example, this could find just one free slot
+ // on the 100th swept span. That limits allocation latency, but
+ // still has very poor throughput. We could instead keep a
+ // running free-to-used budget and switch to fresh span
+ // allocation if the budget runs low.
+ spanBudget := 100
+
+ var s *mspan
+ var sl sweepLocker
+
+ // Try partial swept spans first.
+ sg := mheap_.sweepgen
+ if s = c.partialSwept(sg).pop(); s != nil {
+ goto havespan
+ }
+
+ sl = sweep.active.begin()
+ if sl.valid {
+ // Now try partial unswept spans.
+ for ; spanBudget >= 0; spanBudget-- {
+ s = c.partialUnswept(sg).pop()
+ if s == nil {
+ break
+ }
+ if s, ok := sl.tryAcquire(s); ok {
+ // We got ownership of the span, so let's sweep it and use it.
+ s.sweep(true)
+ sweep.active.end(sl)
+ goto havespan
+ }
+ // We failed to get ownership of the span, which means it's being or
+ // has been swept by an asynchronous sweeper that just couldn't remove it
+ // from the unswept list. That sweeper took ownership of the span and
+ // responsibility for either freeing it to the heap or putting it on the
+ // right swept list. Either way, we should just ignore it (and it's unsafe
+ // for us to do anything else).
+ }
+ // Now try full unswept spans, sweeping them and putting them into the
+ // right list if we fail to get a span.
+ for ; spanBudget >= 0; spanBudget-- {
+ s = c.fullUnswept(sg).pop()
+ if s == nil {
+ break
+ }
+ if s, ok := sl.tryAcquire(s); ok {
+ // We got ownership of the span, so let's sweep it.
+ s.sweep(true)
+ // Check if there's any free space.
+ freeIndex := s.nextFreeIndex()
+ if freeIndex != s.nelems {
+ s.freeindex = freeIndex
+ sweep.active.end(sl)
+ goto havespan
+ }
+ // Add it to the swept list, because sweeping didn't give us any free space.
+ c.fullSwept(sg).push(s.mspan)
+ }
+ // See comment for partial unswept spans.
+ }
+ sweep.active.end(sl)
+ }
+ if trace.enabled {
+ traceGCSweepDone()
+ traceDone = true
+ }
+
+ // We failed to get a span from the mcentral so get one from mheap.
+ s = c.grow()
+ if s == nil {
+ return nil
+ }
+
+ // At this point s is a span that should have free slots.
+havespan:
+ if trace.enabled && !traceDone {
+ traceGCSweepDone()
+ }
+ n := int(s.nelems) - int(s.allocCount)
+ if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
+ throw("span has no free objects")
+ }
+ freeByteBase := s.freeindex &^ (64 - 1)
+ whichByte := freeByteBase / 8
+ // Init alloc bits cache.
+ s.refillAllocCache(whichByte)
+
+ // Adjust the allocCache so that s.freeindex corresponds to the low bit in
+ // s.allocCache.
+ s.allocCache >>= s.freeindex % 64
+
+ return s
+}
+
+// Return span from an mcache.
+//
+// s must have a span class corresponding to this
+// mcentral and it must not be empty.
+func (c *mcentral) uncacheSpan(s *mspan) {
+ if s.allocCount == 0 {
+ throw("uncaching span but s.allocCount == 0")
+ }
+
+ sg := mheap_.sweepgen
+ stale := s.sweepgen == sg+1
+
+ // Fix up sweepgen.
+ if stale {
+ // Span was cached before sweep began. It's our
+ // responsibility to sweep it.
+ //
+ // Set sweepgen to indicate it's not cached but needs
+ // sweeping and can't be allocated from. sweep will
+ // set s.sweepgen to indicate s is swept.
+ atomic.Store(&s.sweepgen, sg-1)
+ } else {
+ // Indicate that s is no longer cached.
+ atomic.Store(&s.sweepgen, sg)
+ }
+
+ // Put the span in the appropriate place.
+ if stale {
+ // It's stale, so just sweep it. Sweeping will put it on
+ // the right list.
+ //
+ // We don't use a sweepLocker here. Stale cached spans
+ // aren't in the global sweep lists, so mark termination
+ // itself holds up sweep completion until all mcaches
+ // have been swept.
+ ss := sweepLocked{s}
+ ss.sweep(false)
+ } else {
+ if int(s.nelems)-int(s.allocCount) > 0 {
+ // Put it back on the partial swept list.
+ c.partialSwept(sg).push(s)
+ } else {
+ // There's no free space and it's not stale, so put it on the
+ // full swept list.
+ c.fullSwept(sg).push(s)
+ }
+ }
+}
+
+// grow allocates a new empty span from the heap and initializes it for c's size class.
+func (c *mcentral) grow() *mspan {
+ npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
+ size := uintptr(class_to_size[c.spanclass.sizeclass()])
+
+ s := mheap_.alloc(npages, c.spanclass)
+ if s == nil {
+ return nil
+ }
+
+ // Use division by multiplication and shifts to quickly compute:
+ // n := (npages << _PageShift) / size
+ n := s.divideByElemSize(npages << _PageShift)
+ s.limit = s.base() + size*n
+ heapBitsForAddr(s.base()).initSpan(s)
+ return s
+}