aboutsummaryrefslogtreecommitdiff
path: root/src/zone/accounting-allocator.cc
blob: c06306309d2badb5db797e4566f812b288e478fe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/zone/accounting-allocator.h"

#include <cstdlib>

#if V8_LIBC_BIONIC
#include <malloc.h>  // NOLINT
#endif

namespace v8 {
namespace internal {

AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
  static const size_t kDefaultBucketMaxSize = 5;

  memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
  std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
            nullptr);
  std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
  std::fill(unused_segments_max_sizes_,
            unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
}

AccountingAllocator::~AccountingAllocator() { ClearPool(); }

void AccountingAllocator::MemoryPressureNotification(
    MemoryPressureLevel level) {
  memory_pressure_level_.SetValue(level);

  if (level != MemoryPressureLevel::kNone) {
    ClearPool();
  }
}

void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
  // The sum of the bytes of one segment of each size.
  static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
                                  (size_t(1) << kMinSegmentSizePower);
  size_t fits_fully = max_pool_size / full_size;

  base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

  // We assume few zones (less than 'fits_fully' many) to be active at the same
  // time. When zones grow regularly, they will keep requesting segments of
  // increasing size each time. Therefore we try to get as many segments with an
  // equal number of segments of each size as possible.
  // The remaining space is used to make more room for an 'incomplete set' of
  // segments beginning with the smaller ones.
  // This code will work best if the max_pool_size is a multiple of the
  // full_size. If max_pool_size is no sum of segment sizes the actual pool
  // size might be smaller then max_pool_size. Note that no actual memory gets
  // wasted though.
  // TODO(heimbuef): Determine better strategy generating a segment sizes
  // distribution that is closer to real/benchmark usecases and uses the given
  // max_pool_size more efficiently.
  size_t total_size = fits_fully * full_size;

  for (size_t power = 0; power < kNumberBuckets; ++power) {
    if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
        max_pool_size) {
      unused_segments_max_sizes_[power] = fits_fully + 1;
      total_size += size_t(1) << power;
    } else {
      unused_segments_max_sizes_[power] = fits_fully;
    }
  }
}

Segment* AccountingAllocator::GetSegment(size_t bytes) {
  Segment* result = GetSegmentFromPool(bytes);
  if (result == nullptr) {
    result = AllocateSegment(bytes);
    if (result != nullptr) {
      result->Initialize(bytes);
    }
  }

  return result;
}

Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
  void* memory = malloc(bytes);
  if (memory) {
    base::AtomicWord current =
        base::NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
    base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
    while (current > max) {
      max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
    }
  }
  return reinterpret_cast<Segment*>(memory);
}

void AccountingAllocator::ReturnSegment(Segment* segment) {
  segment->ZapContents();

  if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
    FreeSegment(segment);
  } else if (!AddSegmentToPool(segment)) {
    FreeSegment(segment);
  }
}

void AccountingAllocator::FreeSegment(Segment* memory) {
  base::NoBarrier_AtomicIncrement(
      &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
  memory->ZapHeader();
  free(memory);
}

size_t AccountingAllocator::GetCurrentMemoryUsage() const {
  return base::NoBarrier_Load(&current_memory_usage_);
}

size_t AccountingAllocator::GetMaxMemoryUsage() const {
  return base::NoBarrier_Load(&max_memory_usage_);
}

size_t AccountingAllocator::GetCurrentPoolSize() const {
  return base::NoBarrier_Load(&current_pool_size_);
}

Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
  if (requested_size > (1 << kMaxSegmentSizePower)) {
    return nullptr;
  }

  size_t power = kMinSegmentSizePower;
  while (requested_size > (static_cast<size_t>(1) << power)) power++;

  DCHECK_GE(power, kMinSegmentSizePower + 0);
  power -= kMinSegmentSizePower;

  Segment* segment;
  {
    base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

    segment = unused_segments_heads_[power];

    if (segment != nullptr) {
      unused_segments_heads_[power] = segment->next();
      segment->set_next(nullptr);

      unused_segments_sizes_[power]--;
      base::NoBarrier_AtomicIncrement(
          &current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
    }
  }

  if (segment) {
    DCHECK_GE(segment->size(), requested_size);
  }
  return segment;
}

bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
  size_t size = segment->size();

  if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;

  if (size < (1 << kMinSegmentSizePower)) return false;

  size_t power = kMaxSegmentSizePower;

  while (size < (static_cast<size_t>(1) << power)) power--;

  DCHECK_GE(power, kMinSegmentSizePower + 0);
  power -= kMinSegmentSizePower;

  {
    base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

    if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
      return false;
    }

    segment->set_next(unused_segments_heads_[power]);
    unused_segments_heads_[power] = segment;
    base::NoBarrier_AtomicIncrement(&current_pool_size_, size);
    unused_segments_sizes_[power]++;
  }

  return true;
}

void AccountingAllocator::ClearPool() {
  base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

  for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
       power++) {
    Segment* current = unused_segments_heads_[power];
    while (current) {
      Segment* next = current->next();
      FreeSegment(current);
      current = next;
    }
    unused_segments_heads_[power] = nullptr;
  }
}

}  // namespace internal
}  // namespace v8