summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTreehugger Robot <treehugger-gerrit@google.com>2021-05-14 00:16:39 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2021-05-14 00:16:39 +0000
commitc107384a7f74d68e29b055c3eda91cb00b8f89d0 (patch)
tree3beb5fb8157f324bdd9403e33a801a907cb4d9fb
parent67889d84f68c35575d083b85c4f894bf16ec5ec8 (diff)
parent467f57ceef075e9744fcf4b1859f1247a52c2cde (diff)
downloadunwinding-c107384a7f74d68e29b055c3eda91cb00b8f89d0.tar.gz
Merge "MapInfo: Lazy allocation of ELF-related fields." am: 027c2676f4 am: 04d0145ad7 am: 467f57ceef
Original change: https://android-review.googlesource.com/c/platform/system/unwinding/+/1706748 Change-Id: If9188883f9c064d54b166a68857ab717ea93e67c
-rw-r--r--libunwindstack/Elf.cpp2
-rw-r--r--libunwindstack/MapInfo.cpp34
-rw-r--r--libunwindstack/include/unwindstack/Elf.h2
-rw-r--r--libunwindstack/include/unwindstack/MapInfo.h161
-rw-r--r--libunwindstack/tests/MapInfoTest.cpp43
5 files changed, 161 insertions, 81 deletions
diff --git a/libunwindstack/Elf.cpp b/libunwindstack/Elf.cpp
index b52d294..7f4142b 100644
--- a/libunwindstack/Elf.cpp
+++ b/libunwindstack/Elf.cpp
@@ -101,7 +101,7 @@ std::string Elf::GetSoname() {
return interface_->GetSoname();
}
-uint64_t Elf::GetRelPc(uint64_t pc, const MapInfo* map_info) {
+uint64_t Elf::GetRelPc(uint64_t pc, MapInfo* map_info) {
return pc - map_info->start() + load_bias_ + map_info->elf_offset();
}
diff --git a/libunwindstack/MapInfo.cpp b/libunwindstack/MapInfo.cpp
index 6fcfdaf..8fb6f7e 100644
--- a/libunwindstack/MapInfo.cpp
+++ b/libunwindstack/MapInfo.cpp
@@ -213,7 +213,7 @@ Memory* MapInfo::CreateMemory(const std::shared_ptr<Memory>& process_memory) {
Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch) {
{
// Make sure no other thread is trying to add the elf to this map.
- std::lock_guard<std::mutex> guard(mutex_);
+ std::lock_guard<std::mutex> guard(elf_mutex());
if (elf().get() != nullptr) {
return elf().get();
@@ -259,7 +259,7 @@ Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum exp
// If there is a read-only map then a read-execute map that represents the
// same elf object, make sure the previous map is using the same elf
// object if it hasn't already been set.
- std::lock_guard<std::mutex> guard(prev_real_map()->mutex_);
+ std::lock_guard<std::mutex> guard(prev_real_map()->elf_mutex());
if (prev_real_map()->elf().get() == nullptr) {
prev_real_map()->set_elf(elf());
prev_real_map()->set_memory_backed_elf(memory_backed_elf());
@@ -274,7 +274,7 @@ Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum exp
bool MapInfo::GetFunctionName(uint64_t addr, SharedString* name, uint64_t* func_offset) {
{
// Make sure no other thread is trying to update this elf object.
- std::lock_guard<std::mutex> guard(mutex_);
+ std::lock_guard<std::mutex> guard(elf_mutex());
if (elf() == nullptr) {
return false;
}
@@ -292,7 +292,7 @@ uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
{
// Make sure no other thread is trying to add the elf to this map.
- std::lock_guard<std::mutex> guard(mutex_);
+ std::lock_guard<std::mutex> guard(elf_mutex());
if (elf() != nullptr) {
if (elf()->valid()) {
cur_load_bias = elf()->GetLoadBias();
@@ -314,7 +314,11 @@ uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
}
MapInfo::~MapInfo() {
- delete build_id().load();
+ ElfFields* elf_fields = elf_fields_.load();
+ if (elf_fields != nullptr) {
+ delete elf_fields->build_id_.load();
+ delete elf_fields;
+ }
}
SharedString MapInfo::GetBuildID() {
@@ -329,9 +333,9 @@ SharedString MapInfo::GetBuildID() {
// Now need to see if the elf object exists.
// Make sure no other thread is trying to add the elf to this map.
- mutex_.lock();
+ elf_mutex().lock();
Elf* elf_obj = elf().get();
- mutex_.unlock();
+ elf_mutex().unlock();
std::string result;
if (elf_obj != nullptr) {
result = elf_obj->GetBuildID();
@@ -360,6 +364,22 @@ SharedString MapInfo::SetBuildID(std::string&& new_build_id) {
}
}
+MapInfo::ElfFields& MapInfo::GetElfFields() {
+ ElfFields* elf_fields = elf_fields_.load(std::memory_order_acquire);
+ if (elf_fields != nullptr) {
+ return *elf_fields;
+ }
+ // Allocate and initialize the field in thread-safe way.
+ std::unique_ptr<ElfFields> desired(new ElfFields());
+ ElfFields* expected = nullptr;
+ // Strong version is reliable. Weak version might randomly return false.
+ if (elf_fields_.compare_exchange_strong(expected, desired.get())) {
+ return *desired.release(); // Success: we transferred the pointer ownership to the field.
+ } else {
+ return *expected; // Failure: 'expected' is updated to the value set by the other thread.
+ }
+}
+
std::string MapInfo::GetPrintableBuildID() {
std::string raw_build_id = GetBuildID();
if (raw_build_id.empty()) {
diff --git a/libunwindstack/include/unwindstack/Elf.h b/libunwindstack/include/unwindstack/Elf.h
index 5a496a7..01d9af9 100644
--- a/libunwindstack/include/unwindstack/Elf.h
+++ b/libunwindstack/include/unwindstack/Elf.h
@@ -57,7 +57,7 @@ class Elf {
bool GetGlobalVariableOffset(const std::string& name, uint64_t* memory_offset);
- uint64_t GetRelPc(uint64_t pc, const MapInfo* map_info);
+ uint64_t GetRelPc(uint64_t pc, MapInfo* map_info);
bool StepIfSignalHandler(uint64_t rel_pc, Regs* regs, Memory* process_memory);
diff --git a/libunwindstack/include/unwindstack/MapInfo.h b/libunwindstack/include/unwindstack/MapInfo.h
index 2a4696a..727dc68 100644
--- a/libunwindstack/include/unwindstack/MapInfo.h
+++ b/libunwindstack/include/unwindstack/MapInfo.h
@@ -31,109 +31,102 @@ namespace unwindstack {
class MemoryFileAtOffset;
+// Represents virtual memory map (as obtained from /proc/*/maps).
+//
+// Note that we have to be surprisingly careful with memory usage here,
+// since in system-wide profiling this data can take considerable space.
+// (for example, 400 process * 400 maps * 128 bytes = 20 MB + string data).
class MapInfo {
public:
MapInfo(MapInfo* prev_map, MapInfo* prev_real_map, uint64_t start, uint64_t end, uint64_t offset,
- uint64_t flags, const char* name)
- : start_(start),
- end_(end),
- offset_(offset),
- flags_(flags),
- name_(name),
- prev_map_(prev_map),
- prev_real_map_(prev_real_map),
- load_bias_(INT64_MAX),
- build_id_(0) {
- if (prev_real_map != nullptr) prev_real_map->next_real_map_ = this;
- }
- MapInfo(MapInfo* prev_map, MapInfo* prev_real_map, uint64_t start, uint64_t end, uint64_t offset,
uint64_t flags, SharedString name)
: start_(start),
end_(end),
offset_(offset),
flags_(flags),
name_(name),
+ elf_fields_(nullptr),
prev_map_(prev_map),
- prev_real_map_(prev_real_map),
- load_bias_(INT64_MAX),
- build_id_(0) {
+ prev_real_map_(prev_real_map) {
if (prev_real_map != nullptr) prev_real_map->next_real_map_ = this;
}
~MapInfo();
+ // Cached data for mapped ELF files.
+ // We allocate this structure lazily since there are much fewer ELFs than maps.
+ struct ElfFields {
+ ElfFields() : load_bias_(INT64_MAX), build_id_(0) {}
+
+ std::shared_ptr<Elf> elf_;
+ // The offset of the beginning of this mapping to the beginning of the
+ // ELF file.
+ // elf_offset == offset - elf_start_offset.
+ // This value is only non-zero if the offset is non-zero but there is
+ // no elf signature found at that offset.
+ uint64_t elf_offset_ = 0;
+ // This value is the offset into the file of the map in memory that is the
+ // start of the elf. This is not equal to offset when the linker splits
+ // shared libraries into a read-only and read-execute map.
+ uint64_t elf_start_offset_ = 0;
+
+ std::atomic_int64_t load_bias_;
+
+ // This is a pointer to a new'd std::string.
+ // Using an atomic value means that we don't need to lock and will
+ // make it easier to move to a fine grained lock in the future.
+ std::atomic<SharedString*> build_id_;
+
+ // Set to true if the elf file data is coming from memory.
+ bool memory_backed_elf_ = false;
+
+ // Protect the creation of the elf object.
+ std::mutex elf_mutex_;
+ };
+
inline uint64_t start() const { return start_; }
inline void set_start(uint64_t value) { start_ = value; }
+
inline uint64_t end() const { return end_; }
inline void set_end(uint64_t value) { end_ = value; }
+
inline uint64_t offset() const { return offset_; }
inline void set_offset(uint64_t value) { offset_ = value; }
+
inline uint16_t flags() const { return flags_; }
inline void set_flags(uint16_t value) { flags_ = value; }
+
inline SharedString& name() { return name_; }
inline void set_name(SharedString& value) { name_ = value; }
inline void set_name(const char* value) { name_ = value; }
- inline std::shared_ptr<Elf>& elf() { return elf_; }
- inline void set_elf(std::shared_ptr<Elf>& value) { elf_ = value; }
- inline void set_elf(Elf* value) { elf_.reset(value); }
- inline uint64_t elf_offset() const { return elf_offset_; }
- inline void set_elf_offset(uint64_t value) { elf_offset_ = value; }
- inline uint64_t elf_start_offset() const { return elf_start_offset_; }
- inline void set_elf_start_offset(uint64_t value) { elf_start_offset_ = value; }
- inline MapInfo* prev_map() const { return prev_map_; }
- inline void set_prev_map(MapInfo* value) { prev_map_ = value; }
- inline MapInfo* prev_real_map() const { return prev_real_map_; }
- inline void set_prev_real_map(MapInfo* value) { prev_real_map_ = value; }
- inline MapInfo* next_real_map() const { return next_real_map_; }
- inline void set_next_real_map(MapInfo* value) { next_real_map_ = value; }
- inline std::atomic_int64_t& load_bias() { return load_bias_; }
- inline void set_load_bias(int64_t value) { load_bias_ = value; }
- inline std::atomic<SharedString*>& build_id() { return build_id_; }
- inline void set_build_id(SharedString* value) { build_id_ = value; }
- inline bool memory_backed_elf() const { return memory_backed_elf_; }
- inline void set_memory_backed_elf(bool value) { memory_backed_elf_ = value; }
- private:
- uint64_t start_ = 0;
- uint64_t end_ = 0;
- uint64_t offset_ = 0;
- uint16_t flags_ = 0;
- SharedString name_;
- std::shared_ptr<Elf> elf_;
- // The offset of the beginning of this mapping to the beginning of the
- // ELF file.
- // elf_offset == offset - elf_start_offset.
- // This value is only non-zero if the offset is non-zero but there is
- // no elf signature found at that offset.
- uint64_t elf_offset_ = 0;
- // This value is the offset into the file of the map in memory that is the
- // start of the elf. This is not equal to offset when the linker splits
- // shared libraries into a read-only and read-execute map.
- uint64_t elf_start_offset_ = 0;
+ inline std::shared_ptr<Elf>& elf() { return GetElfFields().elf_; }
+ inline void set_elf(std::shared_ptr<Elf>& value) { GetElfFields().elf_ = value; }
+ inline void set_elf(Elf* value) { GetElfFields().elf_.reset(value); }
- MapInfo* prev_map_ = nullptr;
- // This is the previous map that is not empty with a 0 offset. For
- // example, this set of maps:
- // 1000-2000 r--p 000000 00:00 0 libc.so
- // 2000-3000 ---p 000000 00:00 0 libc.so
- // 3000-4000 r-xp 003000 00:00 0 libc.so
- // The last map's prev_map would point to the 2000-3000 map, while the
- // prev_real_map would point to the 1000-2000 map.
- MapInfo* prev_real_map_ = nullptr;
+ inline uint64_t elf_offset() { return GetElfFields().elf_offset_; }
+ inline void set_elf_offset(uint64_t value) { GetElfFields().elf_offset_ = value; }
- // Same as above but set to point to the next map.
- MapInfo* next_real_map_ = nullptr;
+ inline uint64_t elf_start_offset() { return GetElfFields().elf_start_offset_; }
+ inline void set_elf_start_offset(uint64_t value) { GetElfFields().elf_start_offset_ = value; }
- std::atomic_int64_t load_bias_;
+ inline std::atomic_int64_t& load_bias() { return GetElfFields().load_bias_; }
+ inline void set_load_bias(int64_t value) { GetElfFields().load_bias_ = value; }
- // This is a pointer to a new'd std::string.
- // Using an atomic value means that we don't need to lock and will
- // make it easier to move to a fine grained lock in the future.
- std::atomic<SharedString*> build_id_;
+ inline std::atomic<SharedString*>& build_id() { return GetElfFields().build_id_; }
+ inline void set_build_id(SharedString* value) { GetElfFields().build_id_ = value; }
- // Set to true if the elf file data is coming from memory.
- bool memory_backed_elf_ = false;
+ inline bool memory_backed_elf() { return GetElfFields().memory_backed_elf_; }
+ inline void set_memory_backed_elf(bool value) { GetElfFields().memory_backed_elf_ = value; }
+
+ inline MapInfo* prev_map() const { return prev_map_; }
+ inline void set_prev_map(MapInfo* value) { prev_map_ = value; }
+
+ inline MapInfo* prev_real_map() const { return prev_real_map_; }
+ inline void set_prev_real_map(MapInfo* value) { prev_real_map_ = value; }
+
+ inline MapInfo* next_real_map() const { return next_real_map_; }
+ inline void set_next_real_map(MapInfo* value) { next_real_map_ = value; }
- public:
// This function guarantees it will never return nullptr.
Elf* GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch);
@@ -154,6 +147,9 @@ class MapInfo {
inline bool IsBlank() { return offset() == 0 && flags() == 0 && name().empty(); }
+ // Returns elf_fields_. It will create the object if it is null.
+ ElfFields& GetElfFields();
+
private:
MapInfo(const MapInfo&) = delete;
void operator=(const MapInfo&) = delete;
@@ -162,7 +158,28 @@ class MapInfo {
bool InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset* memory);
// Protect the creation of the elf object.
- std::mutex mutex_;
+ std::mutex& elf_mutex() { return GetElfFields().elf_mutex_; }
+
+ uint64_t start_ = 0;
+ uint64_t end_ = 0;
+ uint64_t offset_ = 0;
+ uint16_t flags_ = 0;
+ SharedString name_;
+
+ std::atomic<ElfFields*> elf_fields_;
+
+ MapInfo* prev_map_ = nullptr;
+ // This is the previous map that is not empty with a 0 offset. For
+ // example, this set of maps:
+ // 1000-2000 r--p 000000 00:00 0 libc.so
+ // 2000-3000 ---p 000000 00:00 0 libc.so
+ // 3000-4000 r-xp 003000 00:00 0 libc.so
+ // The last map's prev_map would point to the 2000-3000 map, while the
+ // prev_real_map would point to the 1000-2000 map.
+ MapInfo* prev_real_map_ = nullptr;
+
+ // Same as above but set to point to the next map.
+ MapInfo* next_real_map_ = nullptr;
};
} // namespace unwindstack
diff --git a/libunwindstack/tests/MapInfoTest.cpp b/libunwindstack/tests/MapInfoTest.cpp
index 59aa79e..055e965 100644
--- a/libunwindstack/tests/MapInfoTest.cpp
+++ b/libunwindstack/tests/MapInfoTest.cpp
@@ -16,6 +16,8 @@
#include <stdint.h>
+#include <thread>
+
#include <gtest/gtest.h>
#include <unwindstack/MapInfo.h>
@@ -72,4 +74,45 @@ TEST(MapInfoTest, get_function_name) {
EXPECT_EQ(1000UL, offset);
}
+TEST(MapInfoTest, multiple_thread_get_elf_fields) {
+ MapInfo map_info(nullptr, nullptr, 0, 0, 0, 0, "");
+
+ static constexpr size_t kNumConcurrentThreads = 100;
+ MapInfo::ElfFields* fields[kNumConcurrentThreads];
+ Elf* elfs[kNumConcurrentThreads];
+ uint64_t offsets[kNumConcurrentThreads];
+
+ std::atomic_bool wait;
+ wait = true;
+ // Create all of the threads and have them do the call at the same time
+ // to make it likely that a race will occur.
+ std::vector<std::thread*> threads;
+ for (size_t i = 0; i < kNumConcurrentThreads; i++) {
+ std::thread* thread = new std::thread([&]() {
+ while (wait)
+ ;
+ fields[i] = &map_info.GetElfFields();
+ elfs[i] = fields[i]->elf_.get();
+ offsets[i] = fields[i]->elf_offset_;
+ });
+ threads.push_back(thread);
+ }
+
+ // Set them all going and wait for the threads to finish.
+ wait = false;
+ for (auto thread : threads) {
+ thread->join();
+ delete thread;
+ }
+
+ // Now verify that all of them are exactly the same and valid.
+ for (size_t i = 0; i < kNumConcurrentThreads; i++) {
+ EXPECT_EQ(fields[0], fields[i]) << "Thread " << i << " mismatched.";
+ EXPECT_EQ(elfs[0], elfs[i]) << "Thread " << i << " mismatched.";
+ EXPECT_EQ(offsets[0], offsets[i]) << "Thread " << i << " mismatched.";
+ EXPECT_EQ(nullptr, elfs[i]);
+ EXPECT_EQ(0u, offsets[i]);
+ }
+}
+
} // namespace unwindstack