summaryrefslogtreecommitdiff
path: root/base/trace_event/heap_profiler_allocation_register.cc
blob: b9f440adb69d5a131fdc14fa2cde7a2f1da6466d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/trace_event/heap_profiler_allocation_register.h"

#include <algorithm>
#include <limits>

#include "base/trace_event/trace_event_memory_overhead.h"

namespace base {
namespace trace_event {

AllocationRegister::ConstIterator::ConstIterator(
    const AllocationRegister& alloc_register,
    AllocationIndex index)
    : register_(alloc_register), index_(index) {}

void AllocationRegister::ConstIterator::operator++() {
  index_ = register_.allocations_.Next(index_ + 1);
}

bool AllocationRegister::ConstIterator::operator!=(
    const ConstIterator& other) const {
  return index_ != other.index_;
}

AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
    const {
  return register_.GetAllocation(index_);
}

size_t AllocationRegister::BacktraceHasher::operator()(
    const Backtrace& backtrace) const {
  const size_t kSampleLength = 10;

  uintptr_t total_value = 0;

  size_t head_end = std::min(backtrace.frame_count, kSampleLength);
  for (size_t i = 0; i != head_end; ++i) {
    total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
  }

  size_t tail_start = backtrace.frame_count -
                      std::min(backtrace.frame_count - head_end, kSampleLength);
  for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
    total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
  }

  total_value += backtrace.frame_count;

  // These magic constants give best results in terms of average collisions
  // per backtrace. They were found by replaying real backtraces from Linux
  // and Android against different hash functions.
  return (total_value * 131101) >> 14;
}

size_t AllocationRegister::AddressHasher::operator()(
    const void* address) const {
  // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
  // been chosen carefully based on measurements with real-word data (addresses
  // recorded from a Chrome trace run). It is the first prime after 2^17. For
  // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
  // Microbenchmarks show that this simple scheme outperforms fancy hashes like
  // Murmur3 by 20 to 40 percent.
  const uintptr_t key = reinterpret_cast<uintptr_t>(address);
  const uintptr_t a = 131101;
  const uintptr_t shift = 15;
  const uintptr_t h = (key * a) >> shift;
  return h;
}

AllocationRegister::AllocationRegister()
    : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}

AllocationRegister::AllocationRegister(size_t allocation_capacity,
                                       size_t backtrace_capacity)
    : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
  Backtrace sentinel = {};
  sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
  sentinel.frame_count = 1;

  // Rationale for max / 2: in theory we could just start the sentinel with a
  // refcount == 0. However, using max / 2 allows short circuiting of the
  // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
  // path. From a functional viewpoint, the sentinel is safe even if we wrap
  // over refcount because .
  BacktraceMap::KVPair::second_type sentinel_refcount =
      std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
  auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
  DCHECK(index_and_flag.second);
  DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}

AllocationRegister::~AllocationRegister() {}

bool AllocationRegister::Insert(const void* address,
                                size_t size,
                                const AllocationContext& context) {
  DCHECK(address != nullptr);
  if (size == 0) {
    return false;
  }

  AllocationInfo info = {size, context.type_name,
                         InsertBacktrace(context.backtrace)};

  // Try to insert the allocation.
  auto index_and_flag = allocations_.Insert(address, info);
  if (!index_and_flag.second &&
      index_and_flag.first != AllocationMap::kInvalidKVIndex) {
    // |address| is already there - overwrite the allocation info.
    auto& old_info = allocations_.Get(index_and_flag.first).second;
    RemoveBacktrace(old_info.backtrace_index);
    old_info = info;
    return true;
  }

  return index_and_flag.second;
}

void AllocationRegister::Remove(const void* address) {
  auto index = allocations_.Find(address);
  if (index == AllocationMap::kInvalidKVIndex) {
    return;
  }

  const AllocationInfo& info = allocations_.Get(index).second;
  RemoveBacktrace(info.backtrace_index);
  allocations_.Remove(index);
}

bool AllocationRegister::Get(const void* address,
                             Allocation* out_allocation) const {
  auto index = allocations_.Find(address);
  if (index == AllocationMap::kInvalidKVIndex) {
    return false;
  }

  if (out_allocation) {
    *out_allocation = GetAllocation(index);
  }
  return true;
}

AllocationRegister::ConstIterator AllocationRegister::begin() const {
  return ConstIterator(*this, allocations_.Next(0));
}

AllocationRegister::ConstIterator AllocationRegister::end() const {
  return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
}

void AllocationRegister::EstimateTraceMemoryOverhead(
    TraceEventMemoryOverhead* overhead) const {
  size_t allocated = sizeof(AllocationRegister);
  size_t resident = sizeof(AllocationRegister) +
                    allocations_.EstimateUsedMemory() +
                    backtraces_.EstimateUsedMemory();
  overhead->Add("AllocationRegister", allocated, resident);
}

AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
    const Backtrace& backtrace) {
  auto index = backtraces_.Insert(backtrace, 0).first;
  if (index == BacktraceMap::kInvalidKVIndex)
    return kOutOfStorageBacktraceIndex;
  auto& backtrace_and_count = backtraces_.Get(index);
  backtrace_and_count.second++;
  return index;
}

void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
  auto& backtrace_and_count = backtraces_.Get(index);
  if (--backtrace_and_count.second == 0 &&
      index != kOutOfStorageBacktraceIndex) {
    // Backtrace is not referenced anymore - remove it.
    backtraces_.Remove(index);
  }
}

AllocationRegister::Allocation AllocationRegister::GetAllocation(
    AllocationMap::KVIndex index) const {
  const auto& address_and_info = allocations_.Get(index);
  const auto& backtrace_and_count =
      backtraces_.Get(address_and_info.second.backtrace_index);
  return {address_and_info.first, address_and_info.second.size,
          AllocationContext(backtrace_and_count.first,
                            address_and_info.second.type_name)};
}

}  // namespace trace_event
}  // namespace base