diff options
author | Hidehiko Abe <hidehiko@google.com> | 2018-02-21 02:59:23 +0000 |
---|---|---|
committer | android-build-merger <android-build-merger@google.com> | 2018-02-21 02:59:23 +0000 |
commit | 5dbf0c2ab3ce8ba3300639cb2a50515fe913be7f (patch) | |
tree | b5545667cc754e2b0745fb59dd891d65d30afaeb /base/trace_event/heap_profiler_allocation_register.h | |
parent | 1f654b523e4ccab6e944200c8ba5053d786546a0 (diff) | |
parent | 81c2b1d888e79349850f1cf731c5085c23532d9e (diff) | |
download | libchrome-5dbf0c2ab3ce8ba3300639cb2a50515fe913be7f.tar.gz |
Re-uprev to r462023. am: f810b5921d
am: 81c2b1d888
Change-Id: Ia8700def3b126d58cb3aef1298f5b6bc423c92af
Diffstat (limited to 'base/trace_event/heap_profiler_allocation_register.h')
-rw-r--r-- | base/trace_event/heap_profiler_allocation_register.h | 79 |
1 files changed, 48 insertions, 31 deletions
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h index d6a02faeae..ac9872f001 100644 --- a/base/trace_event/heap_profiler_allocation_register.h +++ b/base/trace_event/heap_profiler_allocation_register.h @@ -48,24 +48,26 @@ class FixedHashMap { // For implementation simplicity API uses integer index instead // of iterators. Most operations (except Find) on KVIndex are O(1). using KVIndex = size_t; - static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1); + enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) }; // Capacity controls how many items this hash map can hold, and largely // affects memory footprint. - FixedHashMap(size_t capacity) - : num_cells_(capacity), - cells_(static_cast<Cell*>( - AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), - buckets_(static_cast<Bucket*>( - AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), - free_list_(nullptr), - next_unused_cell_(0) {} + explicit FixedHashMap(size_t capacity) + : num_cells_(capacity), + num_inserts_dropped_(0), + cells_(static_cast<Cell*>( + AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), + buckets_(static_cast<Bucket*>( + AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), + free_list_(nullptr), + next_unused_cell_(0) {} ~FixedHashMap() { FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket)); } + // Returns {kInvalidKVIndex, false} if the table is full. std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) { Cell** p_cell = Lookup(key); Cell* cell = *p_cell; @@ -74,7 +76,15 @@ class FixedHashMap { } // Get a free cell and link it. - *p_cell = cell = GetFreeCell(); + cell = GetFreeCell(); + if (!cell) { + if (num_inserts_dropped_ < + std::numeric_limits<decltype(num_inserts_dropped_)>::max()) { + ++num_inserts_dropped_; + } + return {kInvalidKVIndex, false}; + } + *p_cell = cell; cell->p_prev = p_cell; cell->next = nullptr; @@ -137,6 +147,8 @@ class FixedHashMap { bits::Align(sizeof(Bucket) * NumBuckets, page_size); } + size_t num_inserts_dropped() const { return num_inserts_dropped_; } + private: friend base::trace_event::AllocationRegisterTest; @@ -175,7 +187,8 @@ class FixedHashMap { } // Returns a cell that is not being used to store an entry (either by - // recycling from the free list or by taking a fresh cell). + // recycling from the free list or by taking a fresh cell). May return + // nullptr if the hash table has run out of memory. Cell* GetFreeCell() { // First try to re-use a cell from the free list. if (free_list_) { @@ -184,26 +197,14 @@ class FixedHashMap { return cell; } - // Otherwise pick the next cell that has not been touched before. - size_t idx = next_unused_cell_; - next_unused_cell_++; - // If the hash table has too little capacity (when too little address space - // was reserved for |cells_|), |next_unused_cell_| can be an index outside - // of the allocated storage. A guard page is allocated there to crash the - // program in that case. There are alternative solutions: - // - Deal with it, increase capacity by reallocating |cells_|. - // - Refuse to insert and let the caller deal with it. - // Because free cells are re-used before accessing fresh cells with a higher - // index, and because reserving address space without touching it is cheap, - // the simplest solution is to just allocate a humongous chunk of address - // space. - - CHECK_LT(next_unused_cell_, num_cells_ + 1) - << "Allocation Register hash table has too little capacity. Increase " - "the capacity to run heap profiler in large sessions."; - - return &cells_[idx]; + // was reserved for |cells_|), return nullptr. + if (next_unused_cell_ >= num_cells_) { + return nullptr; + } + + // Otherwise pick the next cell that has not been touched before. + return &cells_[next_unused_cell_++]; } // Returns a value in the range [0, NumBuckets - 1] (inclusive). @@ -219,6 +220,9 @@ class FixedHashMap { // Number of cells. size_t const num_cells_; + // Number of calls to Insert() that were lost because the hashtable was full. + size_t num_inserts_dropped_; + // The array of cells. This array is backed by mmapped memory. Lower indices // are accessed first, higher indices are accessed only when the |free_list_| // is empty. This is to minimize the amount of resident memory used. @@ -248,6 +252,8 @@ class TraceEventMemoryOverhead; // freed. Internally it has two hashtables: one for Backtraces and one for // actual allocations. Sizes of both hashtables are fixed, and this class // allocates (mmaps) only in its constructor. +// +// When either hash table hits max size, new inserts are dropped. class BASE_EXPORT AllocationRegister { public: // Details about an allocation. @@ -282,7 +288,10 @@ class BASE_EXPORT AllocationRegister { // Inserts allocation details into the table. If the address was present // already, its details are updated. |address| must not be null. - void Insert(const void* address, + // + // Returns true if an insert occurred. Inserts may fail because the table + // is full. + bool Insert(const void* address, size_t size, const AllocationContext& context); @@ -359,6 +368,14 @@ class BASE_EXPORT AllocationRegister { AllocationMap allocations_; BacktraceMap backtraces_; + // Sentinel used when the |backtraces_| table is full. + // + // This is a slightly abstraction to allow for constant propagation. It + // knows that the sentinel will be the first item inserted into the table + // and that the first index retuned will be 0. The constructor DCHECKs + // this assumption. + enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 }; + DISALLOW_COPY_AND_ASSIGN(AllocationRegister); }; |