aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKalesh Singh <kaleshsingh@google.com>2024-03-13 13:38:04 -0700
committerKalesh Singh <kaleshsingh@google.com>2024-03-13 13:38:44 -0700
commit702d9b0bad7eac3d1d59e8e433f5a6a0345ea5f8 (patch)
tree5deeed36e8b295b21632e0aed8a853e7fad53109
parent4084b555b2658f6bdbcb64781b3ff6d478228221 (diff)
downloadbionic-702d9b0bad7eac3d1d59e8e433f5a6a0345ea5f8.tar.gz
Reapply "RELAND: bionic: loader: Extend GNU_RELRO protection"
This reverts commit 26de64896cd339ec5e811523b375e3b4ade4860d. Bug: 328797737 Test: Dexcom G7 app Change-Id: I98882edd17f0ea5432ab254482ab9508bfaf4f56 Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
-rw-r--r--linker/linker.cpp2
-rw-r--r--linker/linker_phdr.cpp72
-rw-r--r--linker/linker_phdr.h2
3 files changed, 70 insertions, 6 deletions
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 4fb9d5b53..e54a52493 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -3418,7 +3418,7 @@ bool soinfo::link_image(const SymbolLookupList& lookup_list, soinfo* local_group
}
bool soinfo::protect_relro() {
- if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 07b54c5d2..821f30dff 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -1010,11 +1010,71 @@ int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
should_pad_segments);
}
+static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
+ const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
+ bool should_pad_segments) {
+ // Find the index and phdr of the LOAD containing the GNU_RELRO segment
+ for (size_t index = 0; index < phdr_count; ++index) {
+ const ElfW(Phdr)* phdr = &phdr_table[index];
+
+ if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
+ // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
+ // LOAD segment mem size, we need to protect only a partial region of the
+ // LOAD segment and therefore cannot avoid a VMA split.
+ //
+ // Note: Don't check the page-aligned mem sizes since the extended protection
+ // may incorrectly write protect non-relocation data.
+ //
+ // Example:
+ //
+ // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
+ // ----------------------------------------------------------------
+ // | | | | |
+ // SEG X | RO | RO | RW | | SEG Y
+ // | | | | |
+ // ----------------------------------------------------------------
+ // | | |
+ // | | |
+ // | | |
+ // relro_vaddr relro_vaddr relro_vaddr
+ // (load_vaddr) + +
+ // relro_memsz load_memsz
+ //
+ // ----------------------------------------------------------------
+ // | PAGE | PAGE |
+ // ----------------------------------------------------------------
+ // | Potential |
+ // |----- Extended RO ----|
+ // | Protection |
+ //
+ // If the check below uses page aligned mem sizes it will cause incorrect write
+ // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
+ if (relro_phdr->p_memsz < phdr->p_memsz) {
+ return;
+ }
+
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+
+ // Attempt extending the VMA (mprotect range). Without extending the range,
+ // mprotect will only RO protect a part of the extended RW LOAD segment, which
+ // will leave an extra split RW VMA (the gap).
+ _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
+ should_pad_segments);
+
+ *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+ return;
+ }
+ }
+}
+
/* Used internally by phdr_table_protect_gnu_relro and
* phdr_table_unprotect_gnu_relro.
*/
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, int prot_flags) {
+ ElfW(Addr) load_bias, int prot_flags,
+ bool should_pad_segments) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
@@ -1041,6 +1101,8 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
// that it starts on a page boundary.
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+ _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
+ should_pad_segments);
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@@ -1065,12 +1127,14 @@ static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t p
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Were segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias) {
- return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, bool should_pad_segments) {
+ return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
+ should_pad_segments);
}
/* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 239ecb947..4deed3349 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -135,7 +135,7 @@ int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_coun
ElfW(Addr) load_bias, bool should_pad_segments);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias);
+ ElfW(Addr) load_bias, bool should_pad_segments);
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int fd, size_t* file_offset);