summaryrefslogtreecommitdiffstats
path: root/js/src/gc/MallocedBlockCache.cpp
blob: 627f4d9dccc8e28a1e21216e4e93e206547e0de0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * vim: set ts=8 sw=2 et tw=80:
 *
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
 * You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "gc/MallocedBlockCache.h"
#include "mozilla/MemoryChecking.h"

using js::PointerAndUint7;
using js::gc::MallocedBlockCache;

MallocedBlockCache::~MallocedBlockCache() { clear(); }

// This is the fallback path for MallocedBlockCache::alloc.  Do not call this
// directly, since it doesn't handle all cases by itself.  See ::alloc for
// further comments.
PointerAndUint7 MallocedBlockCache::allocSlow(size_t size) {
  // We're never expected to handle zero-sized blocks.
  MOZ_ASSERT(size > 0);

  size = js::RoundUp(size, STEP);
  size_t i = size / STEP;
  MOZ_ASSERT(i > 0);

  // Too large to cache; go straight to js_malloc.
  if (MOZ_UNLIKELY(i >= NUM_LISTS)) {
    void* p = js_malloc(size);
    // If p is nullptr, that fact is carried into the PointerAndUint7, and the
    // caller is expected to check that.
    return PointerAndUint7(p, OVERSIZE_BLOCK_LIST_ID);
  }

  // The block is of cacheable size, but we expect the relevant list to be
  // empty, because ::alloc will have handled the case where it wasn't.
  MOZ_ASSERT(i >= 1 && i < NUM_LISTS);
  // Check that i is the right list
  MOZ_ASSERT(i * STEP == size);
  MOZ_RELEASE_ASSERT(lists[i].empty());

  // And so we have to hand the request off to js_malloc.
  void* p = js_malloc(size);
  if (MOZ_UNLIKELY(!p)) {
    return PointerAndUint7(nullptr, 0);  // OOM
  }
  return PointerAndUint7(p, i);
}

void MallocedBlockCache::preen(double percentOfBlocksToDiscard) {
  MOZ_ASSERT(percentOfBlocksToDiscard >= 0.0 &&
             percentOfBlocksToDiscard <= 100.0);
  MOZ_ASSERT(lists[OVERSIZE_BLOCK_LIST_ID].empty());
  for (size_t listID = 1; listID < NUM_LISTS; listID++) {
    MallocedBlockVector& list = lists[listID];
    size_t numToFree =
        size_t(float(list.length()) * (percentOfBlocksToDiscard / 100.0));
    MOZ_RELEASE_ASSERT(numToFree <= list.length());
    while (numToFree > 0) {
      void* block = list.popCopy();
      MOZ_ASSERT(block);
      js_free(block);
      numToFree--;
    }
  }
}

void MallocedBlockCache::clear() {
  MOZ_ASSERT(lists[OVERSIZE_BLOCK_LIST_ID].empty());
  for (size_t i = 1; i < NUM_LISTS; i++) {
    MallocedBlockVector& list = lists[i];
    for (void*& block : list) {
      MOZ_ASSERT(block);
      js_free(block);
      block = nullptr;  // for safety
    }
    list.clear();
  }
}

size_t MallocedBlockCache::sizeOfExcludingThis(
    mozilla::MallocSizeOf mallocSizeOf) const {
  MOZ_ASSERT(lists[OVERSIZE_BLOCK_LIST_ID].empty());
  size_t nBytes = 0;
  for (const MallocedBlockVector& list : lists) {
    nBytes += list.sizeOfExcludingThis(mallocSizeOf);
    // The payload size of each block in `list` is the same.  Hence, we could
    // possibly do better here (measure once and multiply by the length) if we
    // believe that the metadata size for each block is also the same.
    for (void* block : list) {
      MOZ_ASSERT(block);
      nBytes += mallocSizeOf(block);
    }
  }
  return nBytes;
}