aboutsummaryrefslogtreecommitdiff
path: root/src/idmap.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/idmap.rs')
-rw-r--r--src/idmap.rs351
1 files changed, 328 insertions, 23 deletions
diff --git a/src/idmap.rs b/src/idmap.rs
index 870ccf8..8b25356 100644
--- a/src/idmap.rs
+++ b/src/idmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater, Translation,
- VaRange, VirtualAddress,
+ deallocate, Attributes, Constraints, Descriptor, MemoryRegion, PageTable, PhysicalAddress,
+ Translation, VaRange, VirtualAddress,
},
MapError, Mapping,
};
@@ -57,7 +57,7 @@ impl Translation for IdTranslation {
///
/// # Example
///
-/// ```
+/// ```no_run
/// use aarch64_paging::{
/// idmap::IdMap,
/// paging::{Attributes, MemoryRegion},
@@ -71,24 +71,31 @@ impl Translation for IdTranslation {
/// // Map a 2 MiB region of memory as read-write.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
-/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::EXECUTE_NEVER | Attributes::VALID,
+/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
/// ).unwrap();
-/// // Set `TTBR0_EL1` to activate the page table.
-/// # #[cfg(target_arch = "aarch64")]
-/// idmap.activate();
+/// // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
+/// unsafe {
+/// // Set `TTBR0_EL1` to activate the page table.
+/// idmap.activate();
+/// }
///
/// // Write something to the memory...
///
-/// // Restore `TTBR0_EL1` to its earlier value while we modify the page table.
-/// # #[cfg(target_arch = "aarch64")]
-/// idmap.deactivate();
+/// // SAFETY: The program will only use memory within the initially mapped region until `idmap` is
+/// // reactivated below.
+/// unsafe {
+/// // Restore `TTBR0_EL1` to its earlier value while we modify the page table.
+/// idmap.deactivate();
+/// }
/// // Now change the mapping to read-only and executable.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
/// ).unwrap();
-/// # #[cfg(target_arch = "aarch64")]
-/// idmap.activate();
+/// // SAFETY: Everything the program will used is mapped in by this page table.
+/// unsafe {
+/// idmap.activate();
+/// }
/// ```
#[derive(Debug)]
pub struct IdMap {
@@ -108,8 +115,16 @@ impl IdMap {
///
/// Panics if a previous value of `TTBR0_EL1` is already saved and not yet used by a call to
/// `deactivate`.
- #[cfg(target_arch = "aarch64")]
- pub fn activate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the page table doesn't unmap any memory which the program is
+ /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
+ /// dropped as long as its mappings are required, as it will automatically be deactivated when
+ /// it is dropped.
+ pub unsafe fn activate(&mut self) {
self.mapping.activate()
}
@@ -117,10 +132,16 @@ impl IdMap {
/// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
/// configured ASID.
///
- /// Panics if there is no saved `TTRB0_EL1` value because `activate` has not previously been
+ /// Panics if there is no saved `TTBR0_EL1` value because `activate` has not previously been
/// called.
- #[cfg(target_arch = "aarch64")]
- pub fn deactivate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the previous page table which this is switching back to doesn't
+ /// unmap any memory which the program is using.
+ pub unsafe fn deactivate(&mut self) {
self.mapping.deactivate()
}
@@ -139,15 +160,65 @@ impl IdMap {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
+ self.map_range_with_constraints(range, flags, Constraints::empty())
+ }
+
+ /// Maps the given range of virtual addresses to the identical physical addresses with the given
+ /// given flags, taking the given constraints into account.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn map_range_with_constraints(
+ &mut self,
+ range: &MemoryRegion,
+ flags: Attributes,
+ constraints: Constraints,
+ ) -> Result<(), MapError> {
let pa = IdTranslation::virtual_to_physical(range.start());
- self.mapping.map_range(range, pa, flags)
+ self.mapping.map_range(range, pa, flags, constraints)
}
- /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ /// Applies the provided updater function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// This may involve splitting block entries if the provided range is not currently mapped
+ /// down to its precise boundaries. For visiting all the descriptors covering a memory range
+ /// without potential splitting (and no descriptor updates), use
+ /// [`walk_range`](Self::walk_range) instead.
///
- /// The virtual address range passed to the updater function may be expanded compared to the
- /// `range` parameter, due to alignment to block boundaries.
+ /// The updater function receives the following arguments:
+ ///
+ /// - The virtual address range mapped by each page table descriptor. A new descriptor will
+ /// have been allocated before the invocation of the updater function if a page table split
+ /// was needed.
+ /// - A mutable reference to the page table descriptor that permits modifications.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The updater function should return:
+ ///
+ /// - `Ok` to continue updating the remaining entries.
+ /// - `Err` to signal an error and stop updating the remaining entries.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
@@ -162,16 +233,52 @@ impl IdMap {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
- pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
self.mapping.modify_range(range, f)
}
+
+ /// Applies the provided callback function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// The callback function receives the following arguments:
+ ///
+ /// - The full virtual address range mapped by each visited page table descriptor, which may
+ /// exceed the original range passed to `walk_range`, due to alignment to block boundaries.
+ /// - The page table descriptor itself.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The callback function should return:
+ ///
+ /// - `Ok` to continue visiting the remaining entries.
+ /// - `Err` to signal an error and stop visiting the remaining entries.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
+ where
+ F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
+ {
+ self.mapping.walk_range(range, f)
+ }
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
- paging::{Attributes, MemoryRegion, PAGE_SIZE},
+ paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
MapError, VirtualAddress,
};
@@ -181,6 +288,11 @@ mod tests {
fn map_valid() {
// A single byte at the start of the address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, 1),
@@ -191,6 +303,11 @@ mod tests {
// Two pages at the start of the address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, PAGE_SIZE * 2),
@@ -201,6 +318,11 @@ mod tests {
// A single byte at the end of the address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(
@@ -214,6 +336,11 @@ mod tests {
// Two pages, on the boundary between two subtables.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
@@ -224,6 +351,11 @@ mod tests {
// The entire valid address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
@@ -234,6 +366,161 @@ mod tests {
}
#[test]
+ fn map_break_before_make() {
+ const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range_with_constraints(
+ &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ Constraints::NO_BLOCK_MAPPINGS,
+ )
+ .unwrap();
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
+
+ // Splitting a range is permitted if it was mapped down to pages
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range(
+ &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ )
+ .ok();
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
+
+ // Extending a range is fine even if there are block mappings
+ // in the middle
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+
+ // Splitting a range is not permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ BLOCK_SIZE,
+ BLOCK_SIZE + PAGE_SIZE
+ )))
+ );
+
+ // Remapping a partially live range read-only is only permitted
+ // if it does not require splitting
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ 0,
+ BLOCK_SIZE + PAGE_SIZE
+ )))
+ );
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, BLOCK_SIZE),
+ Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY,
+ ),
+ Ok(())
+ );
+
+ // Changing the memory type is not permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, BLOCK_SIZE),
+ Attributes::DEVICE_NGNRE | Attributes::VALID | Attributes::NON_GLOBAL,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ 0, BLOCK_SIZE
+ )))
+ );
+
+ // Making a range invalid is only permitted if it does not require splitting
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ PAGE_SIZE,
+ BLOCK_SIZE + PAGE_SIZE
+ )))
+ );
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE),
+ Attributes::NORMAL,
+ ),
+ Ok(())
+ );
+
+ // Creating a new valid entry is always permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, 2 * PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+
+ // Setting the non-global attribute is permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID | Attributes::NON_GLOBAL,
+ ),
+ Ok(())
+ );
+
+ // Removing the non-global attribute from a live mapping is not permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ 0, PAGE_SIZE
+ )))
+ );
+
+ // SAFETY: This doesn't actually deactivate the page table in tests, it just treats it as
+ // inactive for the sake of BBM rules.
+ unsafe {
+ idmap.deactivate();
+ }
+ // Removing the non-global attribute from an inactive mapping is permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+ }
+
+ #[test]
fn map_out_of_range() {
let mut idmap = IdMap::new(1, 1);
@@ -274,6 +561,11 @@ mod tests {
| Attributes::VALID,
)
.unwrap();
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
idmap
}
@@ -295,6 +587,14 @@ mod tests {
#[test]
fn update_range() {
let mut idmap = make_map();
+ assert!(idmap
+ .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ entry.modify_flags(Attributes::SWFLAG_0, Attributes::NON_GLOBAL);
+ }
+ Ok(())
+ })
+ .is_err());
idmap
.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
if level == 3 || !entry.is_table_or_page() {
@@ -319,6 +619,11 @@ mod tests {
fn breakup_invalid_block() {
const BLOCK_RANGE: usize = 0x200000;
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
idmap
.map_range(
&MemoryRegion::new(0, BLOCK_RANGE),