aboutsummaryrefslogtreecommitdiff
path: root/src/linearmap.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/linearmap.rs')
-rw-r--r--src/linearmap.rs139
1 files changed, 124 insertions, 15 deletions
diff --git a/src/linearmap.rs b/src/linearmap.rs
index 7dd7c09..921a683 100644
--- a/src/linearmap.rs
+++ b/src/linearmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, is_aligned, Attributes, MemoryRegion, PageTable, PhysicalAddress, Translation,
- VaRange, VirtualAddress, PAGE_SIZE,
+ deallocate, is_aligned, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater,
+ Translation, VaRange, VirtualAddress, PAGE_SIZE,
},
MapError, Mapping,
};
@@ -139,13 +139,16 @@ impl LinearMap {
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
- /// table is active.
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
///
/// # Errors
///
/// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
/// address within the `range` would result in overflow.
///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
@@ -156,6 +159,28 @@ impl LinearMap {
.virtual_to_physical(range.start())?;
self.mapping.map_range(range, pa, flags)
}
+
+ /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ ///
+ /// The virtual address range passed to the updater function may be expanded compared to the
+ /// `range` parameter, due to alignment to block boundaries.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ self.mapping.modify_range(range, f)
+ }
}
#[cfg(test)]
@@ -175,14 +200,20 @@ mod tests {
// A single byte at the start of the address space.
let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
assert_eq!(
- pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+ pagetable.map_range(
+ &MemoryRegion::new(0, 1),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
// Two pages at the start of the address space.
let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
assert_eq!(
- pagetable.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL),
+ pagetable.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE * 2),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
@@ -194,7 +225,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -206,7 +237,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -219,7 +250,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -229,7 +260,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -242,7 +273,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -254,7 +285,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -271,7 +302,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
@@ -282,7 +313,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
@@ -394,7 +425,10 @@ mod tests {
// Test that block mapping is used when the PA is appropriately aligned...
let mut pagetable = LinearMap::new(1, 1, 1 << 30, VaRange::Lower);
pagetable
- .map_range(&MemoryRegion::new(0, 1 << 30), Attributes::NORMAL)
+ .map_range(
+ &MemoryRegion::new(0, 1 << 30),
+ Attributes::NORMAL | Attributes::VALID,
+ )
.unwrap();
assert_eq!(
pagetable.mapping.root.mapping_level(VirtualAddress(0)),
@@ -404,11 +438,86 @@ mod tests {
// ...but not when it is not.
let mut pagetable = LinearMap::new(1, 1, 1 << 29, VaRange::Lower);
pagetable
- .map_range(&MemoryRegion::new(0, 1 << 30), Attributes::NORMAL)
+ .map_range(
+ &MemoryRegion::new(0, 1 << 30),
+ Attributes::NORMAL | Attributes::VALID,
+ )
.unwrap();
assert_eq!(
pagetable.mapping.root.mapping_level(VirtualAddress(0)),
Some(2)
);
}
+
+ fn make_map() -> LinearMap {
+ let mut lmap = LinearMap::new(1, 1, 4096, VaRange::Lower);
+ // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000
+ lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL)
+ .unwrap();
+ lmap
+ }
+
+ #[test]
+ fn update_backwards_range() {
+ let mut lmap = make_map();
+ assert!(lmap
+ .modify_range(
+ &MemoryRegion::new(PAGE_SIZE * 2, 1),
+ &|_range, entry, _level| {
+ entry
+ .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ Ok(())
+ },
+ )
+ .is_err());
+ }
+
+ #[test]
+ fn update_range() {
+ let mut lmap = make_map();
+ lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ }
+ Ok(())
+ })
+ .unwrap();
+ lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
+ assert_eq!(range.end() - range.start(), PAGE_SIZE);
+ }
+ Ok(())
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn breakup_invalid_block() {
+ const BLOCK_RANGE: usize = 0x200000;
+
+ let mut lmap = LinearMap::new(1, 1, 0x1000, VaRange::Lower);
+ lmap.map_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
+ )
+ .unwrap();
+ lmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
+ )
+ .unwrap();
+ lmap.modify_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ &|range, entry, level| {
+ if level == 3 {
+ let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
+ let is_first_page = range.start().0 == 0usize;
+ assert!(has_swflag != is_first_page);
+ }
+ Ok(())
+ },
+ )
+ .unwrap();
+ }
}