aboutsummaryrefslogtreecommitdiff
path: root/src/idmap.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/idmap.rs')
-rw-r--r--src/idmap.rs140
1 files changed, 127 insertions, 13 deletions
diff --git a/src/idmap.rs b/src/idmap.rs
index 06455ed..870ccf8 100644
--- a/src/idmap.rs
+++ b/src/idmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, Translation, VaRange,
- VirtualAddress,
+ deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater, Translation,
+ VaRange, VirtualAddress,
},
MapError, Mapping,
};
@@ -71,7 +71,7 @@ impl Translation for IdTranslation {
/// // Map a 2 MiB region of memory as read-write.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
-/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::EXECUTE_NEVER,
+/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::EXECUTE_NEVER | Attributes::VALID,
/// ).unwrap();
/// // Set `TTBR0_EL1` to activate the page table.
/// # #[cfg(target_arch = "aarch64")]
@@ -85,7 +85,7 @@ impl Translation for IdTranslation {
/// // Now change the mapping to read-only and executable.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
-/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY,
+/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
/// ).unwrap();
/// # #[cfg(target_arch = "aarch64")]
/// idmap.activate();
@@ -130,16 +130,41 @@ impl IdMap {
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
- /// table is active.
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
///
/// # Errors
///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
let pa = IdTranslation::virtual_to_physical(range.start());
self.mapping.map_range(range, pa, flags)
}
+
+ /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ ///
+ /// The virtual address range passed to the updater function may be expanded compared to the
+ /// `range` parameter, due to alignment to block boundaries.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ self.mapping.modify_range(range, f)
+ }
}
#[cfg(test)]
@@ -147,7 +172,7 @@ mod tests {
use super::*;
use crate::{
paging::{Attributes, MemoryRegion, PAGE_SIZE},
- MapError,
+ MapError, VirtualAddress,
};
const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
@@ -157,14 +182,20 @@ mod tests {
// A single byte at the start of the address space.
let mut idmap = IdMap::new(1, 1);
assert_eq!(
- idmap.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+ idmap.map_range(
+ &MemoryRegion::new(0, 1),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
// Two pages at the start of the address space.
let mut idmap = IdMap::new(1, 1);
assert_eq!(
- idmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL),
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE * 2),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
@@ -176,7 +207,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -186,7 +217,7 @@ mod tests {
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -196,7 +227,7 @@ mod tests {
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -213,7 +244,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
@@ -224,11 +255,94 @@ mod tests {
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
)))
);
}
+
+ fn make_map() -> IdMap {
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range(
+ &MemoryRegion::new(0, PAGE_SIZE * 2),
+ Attributes::NORMAL
+ | Attributes::NON_GLOBAL
+ | Attributes::READ_ONLY
+ | Attributes::VALID,
+ )
+ .unwrap();
+ idmap
+ }
+
+ #[test]
+ fn update_backwards_range() {
+ let mut idmap = make_map();
+ assert!(idmap
+ .modify_range(
+ &MemoryRegion::new(PAGE_SIZE * 2, 1),
+ &|_range, entry, _level| {
+ entry
+ .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ Ok(())
+ },
+ )
+ .is_err());
+ }
+
+ #[test]
+ fn update_range() {
+ let mut idmap = make_map();
+ idmap
+ .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ entry
+ .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ }
+ Ok(())
+ })
+ .unwrap();
+ idmap
+ .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
+ assert_eq!(range.end() - range.start(), PAGE_SIZE);
+ }
+ Ok(())
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn breakup_invalid_block() {
+ const BLOCK_RANGE: usize = 0x200000;
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
+ )
+ .unwrap();
+ idmap
+ .map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
+ )
+ .unwrap();
+ idmap
+ .modify_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ &|range, entry, level| {
+ if level == 3 {
+ let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
+ let is_first_page = range.start().0 == 0usize;
+ assert!(has_swflag != is_first_page);
+ }
+ Ok(())
+ },
+ )
+ .unwrap();
+ }
}