diff options
Diffstat (limited to 'linker/linker_phdr.cpp')
-rw-r--r-- | linker/linker_phdr.cpp | 59 |
1 files changed, 54 insertions, 5 deletions
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp index ef7671cee..fa712a10a 100644 --- a/linker/linker_phdr.cpp +++ b/linker/linker_phdr.cpp @@ -46,6 +46,8 @@ #include "private/CFIShadow.h" // For kLibraryAlignment #include "private/elf_note.h" +#include <android-base/file.h> + static int GetTargetElfMachine() { #if defined(__arm__) return EM_ARM; @@ -707,8 +709,28 @@ bool ElfReader::ReserveAddressSpace(address_space_params* address_space) { return true; } +/* + * Returns true if the kernel supports page size migration, else false. + */ +bool page_size_migration_supported() { + static bool pgsize_migration_enabled = []() { + std::string enabled; + if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) { + return false; + } + return enabled.find("1") != std::string::npos; + }(); + return pgsize_migration_enabled; +} + // Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1. bool ElfReader::ReadPadSegmentNote() { + if (!page_size_migration_supported()) { + // Don't attempt to read the note, since segment extension isn't + // supported; but return true so that loading can continue normally. + return true; + } + // The ELF can have multiple PT_NOTE's, check them all for (size_t i = 0; i < phdr_num_; ++i) { const ElfW(Phdr)* phdr = &phdr_table_[i]; @@ -773,7 +795,16 @@ static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t const ElfW(Phdr)* next = nullptr; size_t next_idx = phdr_idx + 1; - if (phdr->p_align == kPageSize || !should_pad_segments) { + // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the + // field e.g. 2MiB p_align for THPs and are relatively small in number. + // + // The kernel can only represent padding for p_align up to 64KiB. This is because + // the kernel uses 4 available bits in the vm_area_struct to represent padding + // extent; and so cannot enable mitigations to avoid breaking app compatibility for + // p_aligns > 64KiB. + // + // Don't perform segment extension on these to avoid app compatibility issues. + if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) { return; } @@ -887,10 +918,28 @@ bool ElfReader::LoadSegments() { // 2) Break the COW backing, faulting in new anon pages for a region // that will not be used. - // _seg_file_end = unextended seg_file_end - uint64_t _seg_file_end = seg_start + phdr->p_filesz; - if ((phdr->p_flags & PF_W) != 0 && page_offset(_seg_file_end) > 0) { - memset(reinterpret_cast<void*>(_seg_file_end), 0, kPageSize - page_offset(_seg_file_end)); + uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz; + if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) { + memset(reinterpret_cast<void*>(unextended_seg_file_end), 0, + kPageSize - page_offset(unextended_seg_file_end)); + } + + // Pages may be brought in due to readahead. + // Drop the padding (zero) pages, to avoid reclaim work later. + // + // NOTE: The madvise() here is special, as it also serves to hint to the + // kernel the portion of the LOAD segment that is padding. + // + // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411 + // [2] https://android-review.googlesource.com/c/kernel/common/+/3048835 + uint64_t pad_start = page_end(unextended_seg_file_end); + uint64_t pad_end = page_end(seg_file_end); + CHECK(pad_start <= pad_end); + uint64_t pad_len = pad_end - pad_start; + if (page_size_migration_supported() && pad_len > 0 && + madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) { + DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m", + name_.c_str(), pad_start, pad_len); } seg_file_end = page_end(seg_file_end); |