aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-11-10 00:10:05 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-11-10 00:10:05 +0000
commit0b53fabf761c142e242c60aa4251b27163c9a7fa (patch)
treecfa77d68d07c0e616791956106a96310afc08e49
parent936bb126efd35e0c0f32ef1473f6bfb0e7540e9f (diff)
parent865f062b6bb380cc789be47a651b52e8191a408d (diff)
downloadaarch64-paging-android14-qpr2-release.tar.gz
Change-Id: I12cf7408cf2d8add7c53c0c1884bfb21c8077a1b
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--.github/dependabot.yml10
-rw-r--r--.github/workflows/rust.yml9
-rw-r--r--Android.bp4
-rw-r--r--CHANGELOG.md24
-rw-r--r--Cargo.toml2
-rw-r--r--Cargo.toml.orig7
-rw-r--r--METADATA8
-rw-r--r--src/idmap.rs351
-rw-r--r--src/lib.rs192
-rw-r--r--src/linearmap.rs137
-rw-r--r--src/paging.rs288
12 files changed, 874 insertions, 160 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 0897c1f..a1fe0b1 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "922f88f390d2d14f07f8c2122204fcb3ce2afab8"
+ "sha1": "f58c903c1fe461a2b06944b8bc5d71653ce2fb02"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..98e44ee
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,10 @@
+version: 2
+updates:
+ - package-ecosystem: "cargo"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index a65965c..dc32484 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -12,7 +12,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Install aarch64 toolchain
uses: actions-rs/toolchain@v1
with:
@@ -30,10 +30,15 @@ jobs:
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Run clippy for aarch64
+ uses: actions-rs/clippy-check@v1
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ args: --target=aarch64-unknown-none
format:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Format Rust code
run: cargo fmt --all -- --check
diff --git a/Android.bp b/Android.bp
index 5255673..f68cdc4 100644
--- a/Android.bp
+++ b/Android.bp
@@ -45,7 +45,7 @@ rust_test {
host_supported: true,
crate_name: "aarch64_paging",
cargo_env_compat: true,
- cargo_pkg_version: "0.4.1",
+ cargo_pkg_version: "0.5.0",
srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -71,7 +71,7 @@ rust_library_rlib {
host_supported: true,
crate_name: "aarch64_paging",
cargo_env_compat: true,
- cargo_pkg_version: "0.4.1",
+ cargo_pkg_version: "0.5.0",
srcs: ["src/lib.rs"],
edition: "2021",
features: [
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fb2c943..e41ca1e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 0.5.0
+
+### Bug fixes
+
+- Reject the `PAGE_OR_TABLE` flag when passed to `map_range`, which would result in corrupt table
+ mappings to be created.
+
+### Breaking changes
+
+- Updated `modify_range` to split block entries before traversing them, and pass only the
+ descriptors and subregions that are completely covered by the given region to the updater callback
+ function.
+- Updated `modify_range` to only pass block or page descriptors to the callback function and prevent
+ them from being converted into table descriptors inadvertently.
+- Added rigid break-before-make (BBM) checks to `map_range` and `modify_range`.
+- Marked `activate` and `deactivate` methods as unsafe.
+
+### New features
+
+- Added new `map_range()` alternative `map_range_with_constraints()` with extra `contraints`
+ argument.
+- Added `walk_range` method that iterates over all block or page descriptorsthat intersect with a
+ given region, without permitting the callback to make changes to the descriptors
+
## 0.4.1
### Bug fixes
diff --git a/Cargo.toml b/Cargo.toml
index 3e58212..1a30de6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2021"
name = "aarch64-paging"
-version = "0.4.1"
+version = "0.5.0"
authors = [
"Ard Biesheuvel <ardb@google.com>",
"Andrew Walbran <qwandor@google.com>",
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index ac9236e..0e61775 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,10 +1,13 @@
[package]
name = "aarch64-paging"
-version = "0.4.1"
+version = "0.5.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "A library to manipulate AArch64 VMSA EL1 page tables."
-authors = ["Ard Biesheuvel <ardb@google.com>", "Andrew Walbran <qwandor@google.com>"]
+authors = [
+ "Ard Biesheuvel <ardb@google.com>",
+ "Andrew Walbran <qwandor@google.com>",
+]
repository = "https://github.com/google/aarch64-paging"
keywords = ["arm", "aarch64", "cortex-a", "vmsa", "pagetable"]
categories = ["embedded", "no-std", "hardware-support"]
diff --git a/METADATA b/METADATA
index a438ff5..78ae9f5 100644
--- a/METADATA
+++ b/METADATA
@@ -11,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.4.1.crate"
+ value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.5.0.crate"
}
- version: "0.4.1"
+ version: "0.5.0"
license_type: NOTICE
last_upgrade_date {
year: 2023
- month: 7
- day: 21
+ month: 10
+ day: 25
}
}
diff --git a/src/idmap.rs b/src/idmap.rs
index 870ccf8..8b25356 100644
--- a/src/idmap.rs
+++ b/src/idmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater, Translation,
- VaRange, VirtualAddress,
+ deallocate, Attributes, Constraints, Descriptor, MemoryRegion, PageTable, PhysicalAddress,
+ Translation, VaRange, VirtualAddress,
},
MapError, Mapping,
};
@@ -57,7 +57,7 @@ impl Translation for IdTranslation {
///
/// # Example
///
-/// ```
+/// ```no_run
/// use aarch64_paging::{
/// idmap::IdMap,
/// paging::{Attributes, MemoryRegion},
@@ -71,24 +71,31 @@ impl Translation for IdTranslation {
/// // Map a 2 MiB region of memory as read-write.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
-/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::EXECUTE_NEVER | Attributes::VALID,
+/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
/// ).unwrap();
-/// // Set `TTBR0_EL1` to activate the page table.
-/// # #[cfg(target_arch = "aarch64")]
-/// idmap.activate();
+/// // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
+/// unsafe {
+/// // Set `TTBR0_EL1` to activate the page table.
+/// idmap.activate();
+/// }
///
/// // Write something to the memory...
///
-/// // Restore `TTBR0_EL1` to its earlier value while we modify the page table.
-/// # #[cfg(target_arch = "aarch64")]
-/// idmap.deactivate();
+/// // SAFETY: The program will only use memory within the initially mapped region until `idmap` is
+/// // reactivated below.
+/// unsafe {
+/// // Restore `TTBR0_EL1` to its earlier value while we modify the page table.
+/// idmap.deactivate();
+/// }
/// // Now change the mapping to read-only and executable.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
/// ).unwrap();
-/// # #[cfg(target_arch = "aarch64")]
-/// idmap.activate();
+/// // SAFETY: Everything the program will used is mapped in by this page table.
+/// unsafe {
+/// idmap.activate();
+/// }
/// ```
#[derive(Debug)]
pub struct IdMap {
@@ -108,8 +115,16 @@ impl IdMap {
///
/// Panics if a previous value of `TTBR0_EL1` is already saved and not yet used by a call to
/// `deactivate`.
- #[cfg(target_arch = "aarch64")]
- pub fn activate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the page table doesn't unmap any memory which the program is
+ /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
+ /// dropped as long as its mappings are required, as it will automatically be deactivated when
+ /// it is dropped.
+ pub unsafe fn activate(&mut self) {
self.mapping.activate()
}
@@ -117,10 +132,16 @@ impl IdMap {
/// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
/// configured ASID.
///
- /// Panics if there is no saved `TTRB0_EL1` value because `activate` has not previously been
+ /// Panics if there is no saved `TTBR0_EL1` value because `activate` has not previously been
/// called.
- #[cfg(target_arch = "aarch64")]
- pub fn deactivate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the previous page table which this is switching back to doesn't
+ /// unmap any memory which the program is using.
+ pub unsafe fn deactivate(&mut self) {
self.mapping.deactivate()
}
@@ -139,15 +160,65 @@ impl IdMap {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
+ self.map_range_with_constraints(range, flags, Constraints::empty())
+ }
+
+ /// Maps the given range of virtual addresses to the identical physical addresses with the given
+ /// given flags, taking the given constraints into account.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn map_range_with_constraints(
+ &mut self,
+ range: &MemoryRegion,
+ flags: Attributes,
+ constraints: Constraints,
+ ) -> Result<(), MapError> {
let pa = IdTranslation::virtual_to_physical(range.start());
- self.mapping.map_range(range, pa, flags)
+ self.mapping.map_range(range, pa, flags, constraints)
}
- /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ /// Applies the provided updater function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// This may involve splitting block entries if the provided range is not currently mapped
+ /// down to its precise boundaries. For visiting all the descriptors covering a memory range
+ /// without potential splitting (and no descriptor updates), use
+ /// [`walk_range`](Self::walk_range) instead.
///
- /// The virtual address range passed to the updater function may be expanded compared to the
- /// `range` parameter, due to alignment to block boundaries.
+ /// The updater function receives the following arguments:
+ ///
+ /// - The virtual address range mapped by each page table descriptor. A new descriptor will
+ /// have been allocated before the invocation of the updater function if a page table split
+ /// was needed.
+ /// - A mutable reference to the page table descriptor that permits modifications.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The updater function should return:
+ ///
+ /// - `Ok` to continue updating the remaining entries.
+ /// - `Err` to signal an error and stop updating the remaining entries.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
@@ -162,16 +233,52 @@ impl IdMap {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
- pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
self.mapping.modify_range(range, f)
}
+
+ /// Applies the provided callback function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// The callback function receives the following arguments:
+ ///
+ /// - The full virtual address range mapped by each visited page table descriptor, which may
+ /// exceed the original range passed to `walk_range`, due to alignment to block boundaries.
+ /// - The page table descriptor itself.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The callback function should return:
+ ///
+ /// - `Ok` to continue visiting the remaining entries.
+ /// - `Err` to signal an error and stop visiting the remaining entries.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
+ where
+ F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
+ {
+ self.mapping.walk_range(range, f)
+ }
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
- paging::{Attributes, MemoryRegion, PAGE_SIZE},
+ paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
MapError, VirtualAddress,
};
@@ -181,6 +288,11 @@ mod tests {
fn map_valid() {
// A single byte at the start of the address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, 1),
@@ -191,6 +303,11 @@ mod tests {
// Two pages at the start of the address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, PAGE_SIZE * 2),
@@ -201,6 +318,11 @@ mod tests {
// A single byte at the end of the address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(
@@ -214,6 +336,11 @@ mod tests {
// Two pages, on the boundary between two subtables.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
@@ -224,6 +351,11 @@ mod tests {
// The entire valid address space.
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
@@ -234,6 +366,161 @@ mod tests {
}
#[test]
+ fn map_break_before_make() {
+ const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range_with_constraints(
+ &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ Constraints::NO_BLOCK_MAPPINGS,
+ )
+ .unwrap();
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
+
+ // Splitting a range is permitted if it was mapped down to pages
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range(
+ &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ )
+ .ok();
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
+
+ // Extending a range is fine even if there are block mappings
+ // in the middle
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+
+ // Splitting a range is not permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ BLOCK_SIZE,
+ BLOCK_SIZE + PAGE_SIZE
+ )))
+ );
+
+ // Remapping a partially live range read-only is only permitted
+ // if it does not require splitting
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ 0,
+ BLOCK_SIZE + PAGE_SIZE
+ )))
+ );
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, BLOCK_SIZE),
+ Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY,
+ ),
+ Ok(())
+ );
+
+ // Changing the memory type is not permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, BLOCK_SIZE),
+ Attributes::DEVICE_NGNRE | Attributes::VALID | Attributes::NON_GLOBAL,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ 0, BLOCK_SIZE
+ )))
+ );
+
+ // Making a range invalid is only permitted if it does not require splitting
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE + PAGE_SIZE),
+ Attributes::NORMAL,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ PAGE_SIZE,
+ BLOCK_SIZE + PAGE_SIZE
+ )))
+ );
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE),
+ Attributes::NORMAL,
+ ),
+ Ok(())
+ );
+
+ // Creating a new valid entry is always permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, 2 * PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+
+ // Setting the non-global attribute is permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID | Attributes::NON_GLOBAL,
+ ),
+ Ok(())
+ );
+
+ // Removing the non-global attribute from a live mapping is not permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
+ 0, PAGE_SIZE
+ )))
+ );
+
+ // SAFETY: This doesn't actually deactivate the page table in tests, it just treats it as
+ // inactive for the sake of BBM rules.
+ unsafe {
+ idmap.deactivate();
+ }
+ // Removing the non-global attribute from an inactive mapping is permitted
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::VALID,
+ ),
+ Ok(())
+ );
+ }
+
+ #[test]
fn map_out_of_range() {
let mut idmap = IdMap::new(1, 1);
@@ -274,6 +561,11 @@ mod tests {
| Attributes::VALID,
)
.unwrap();
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
idmap
}
@@ -295,6 +587,14 @@ mod tests {
#[test]
fn update_range() {
let mut idmap = make_map();
+ assert!(idmap
+ .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ entry.modify_flags(Attributes::SWFLAG_0, Attributes::NON_GLOBAL);
+ }
+ Ok(())
+ })
+ .is_err());
idmap
.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
if level == 3 || !entry.is_table_or_page() {
@@ -319,6 +619,11 @@ mod tests {
fn breakup_invalid_block() {
const BLOCK_RANGE: usize = 0x200000;
let mut idmap = IdMap::new(1, 1);
+ // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
+ // active for the sake of BBM rules.
+ unsafe {
+ idmap.activate();
+ }
idmap
.map_range(
&MemoryRegion::new(0, BLOCK_RANGE),
diff --git a/src/lib.rs b/src/lib.rs
index 3d4aeac..a1ccd97 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -16,7 +16,7 @@
//!
//! # Example
//!
-//! ```
+//! ```no_run
//! # #[cfg(feature = "alloc")] {
//! use aarch64_paging::{
//! idmap::IdMap,
@@ -28,18 +28,21 @@
//!
//! // Create a new page table with identity mapping.
//! let mut idmap = IdMap::new(ASID, ROOT_LEVEL);
-//! // Map a 2 MiB region of memory as read-only.
+//! // Map a 2 MiB region of memory as read-write.
//! idmap.map_range(
//! &MemoryRegion::new(0x80200000, 0x80400000),
-//! Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
+//! Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
//! ).unwrap();
-//! // Set `TTBR0_EL1` to activate the page table.
-//! # #[cfg(target_arch = "aarch64")]
-//! idmap.activate();
+//! // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
+//! unsafe {
+//! // Set `TTBR0_EL1` to activate the page table.
+//! idmap.activate();
+//! }
//! # }
//! ```
#![no_std]
+#![deny(clippy::undocumented_unsafe_blocks)]
#[cfg(feature = "alloc")]
pub mod idmap;
@@ -54,7 +57,7 @@ extern crate alloc;
use core::arch::asm;
use core::fmt::{self, Display, Formatter};
use paging::{
- Attributes, Descriptor, MemoryRegion, PhysicalAddress, PteUpdater, RootTable, Translation,
+ Attributes, Constraints, Descriptor, MemoryRegion, PhysicalAddress, RootTable, Translation,
VaRange, VirtualAddress,
};
@@ -70,6 +73,10 @@ pub enum MapError {
RegionBackwards(MemoryRegion),
/// There was an error while updating a page table entry.
PteUpdateFault(Descriptor),
+ /// The requested flags are not supported for this mapping
+ InvalidFlags(Attributes),
+ /// Updating the range violates break-before-make rules and the mapping is live
+ BreakBeforeMakeViolation(MemoryRegion),
}
impl Display for MapError {
@@ -85,6 +92,12 @@ impl Display for MapError {
Self::PteUpdateFault(desc) => {
write!(f, "Error updating page table entry {:?}", desc)
}
+ Self::InvalidFlags(flags) => {
+ write!(f, "Flags {flags:?} unsupported for mapping.")
+ }
+ Self::BreakBeforeMakeViolation(region) => {
+ write!(f, "Cannot remap region {region} while translation is live.")
+ }
}
}
}
@@ -115,20 +128,36 @@ impl<T: Translation + Clone> Mapping<T> {
}
}
+ /// Returns whether this mapping is currently active.
+ pub fn active(&self) -> bool {
+ self.previous_ttbr.is_some()
+ }
+
/// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
/// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
///
/// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
/// `deactivate`.
- #[cfg(target_arch = "aarch64")]
- pub fn activate(&mut self) {
- assert!(self.previous_ttbr.is_none());
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the page table doesn't unmap any memory which the program is
+ /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
+ /// dropped as long as its mappings are required, as it will automatically be deactivated when
+ /// it is dropped.
+ pub unsafe fn activate(&mut self) {
+ assert!(!self.active());
+
+ #[allow(unused)]
+ let mut previous_ttbr = usize::MAX;
- let mut previous_ttbr;
+ #[cfg(all(not(test), target_arch = "aarch64"))]
+ // SAFETY: Safe because we trust that self.root.to_physical() returns a valid physical
+ // address of a page table, and the `Drop` implementation will reset `TTBRn_EL1` before it
+ // becomes invalid.
unsafe {
- // Safe because we trust that self.root.to_physical() returns a valid physical address
- // of a page table, and the `Drop` implementation will reset `TTBRn_EL1` before it
- // becomes invalid.
match self.root.va_range() {
VaRange::Lower => asm!(
"mrs {previous_ttbr}, ttbr0_el1",
@@ -157,11 +186,20 @@ impl<T: Translation + Clone> Mapping<T> {
///
/// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
/// called.
- #[cfg(target_arch = "aarch64")]
- pub fn deactivate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the previous page table which this is switching back to doesn't
+ /// unmap any memory which the program is using.
+ pub unsafe fn deactivate(&mut self) {
+ assert!(self.active());
+
+ #[cfg(all(not(test), target_arch = "aarch64"))]
+ // SAFETY: Safe because this just restores the previously saved value of `TTBRn_EL1`, which
+ // must have been valid.
unsafe {
- // Safe because this just restores the previously saved value of `TTBRn_EL1`, which must
- // have been valid.
match self.root.va_range() {
VaRange::Lower => asm!(
"msr ttbr0_el1, {ttbrval}",
@@ -188,8 +226,60 @@ impl<T: Translation + Clone> Mapping<T> {
self.previous_ttbr = None;
}
+ /// Checks whether the given range can be mapped or updated while the translation is live,
+ /// without violating architectural break-before-make (BBM) requirements.
+ fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
+ self.walk_range(
+ range,
+ &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| {
+ if d.is_valid() {
+ if !mr.is_block(level) {
+ // Cannot split a live block mapping
+ return Err(());
+ }
+
+ // Get the new flags and output address for this descriptor by applying
+ // the updater function to a copy
+ let (flags, oa) = {
+ let mut dd = *d;
+ updater(mr, &mut dd, level)?;
+ (dd.flags().ok_or(())?, dd.output_address())
+ };
+
+ if !flags.contains(Attributes::VALID) {
+ // Removing the valid bit is always ok
+ return Ok(());
+ }
+
+ if oa != d.output_address() {
+ // Cannot change output address on a live mapping
+ return Err(());
+ }
+
+ let desc_flags = d.flags().unwrap();
+
+ if (desc_flags ^ flags).intersects(Attributes::NORMAL) {
+ // Cannot change memory type
+ return Err(());
+ }
+
+ if (desc_flags - flags).contains(Attributes::NON_GLOBAL) {
+ // Cannot convert from non-global to global
+ return Err(());
+ }
+ }
+ Ok(())
+ },
+ )
+ .map_err(|_| MapError::BreakBeforeMakeViolation(range.clone()))?;
+ Ok(())
+ }
+
/// Maps the given range of virtual addresses to the corresponding range of physical addresses
- /// starting at `pa`, with the given flags.
+ /// starting at `pa`, with the given flags, taking the given constraints into account.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
@@ -203,16 +293,31 @@ impl<T: Translation + Clone> Mapping<T> {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn map_range(
&mut self,
range: &MemoryRegion,
pa: PhysicalAddress,
flags: Attributes,
+ constraints: Constraints,
) -> Result<(), MapError> {
- self.root.map_range(range, pa, flags)?;
+ if self.active() {
+ let c = |mr: &MemoryRegion, d: &mut Descriptor, lvl: usize| {
+ let mask = !(paging::granularity_at_level(lvl) - 1);
+ let pa = (mr.start() - range.start() + pa.0) & mask;
+ d.set(PhysicalAddress(pa), flags);
+ Ok(())
+ };
+ self.check_range_bbm(range, &c)?;
+ }
+ self.root.map_range(range, pa, flags, constraints)?;
#[cfg(target_arch = "aarch64")]
+ // SAFETY: Safe because this is just a memory barrier.
unsafe {
- // Safe because this is just a memory barrier.
asm!("dsb ishst");
}
Ok(())
@@ -220,8 +325,10 @@ impl<T: Translation + Clone> Mapping<T> {
/// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
///
- /// The virtual address range passed to the updater function may be expanded compared to the
- /// `range` parameter, due to alignment to block boundaries.
+ /// This may involve splitting block entries if the provided range is not currently mapped
+ /// down to its precise boundaries. For visiting all the descriptors covering a memory range
+ /// without potential splitting (and no descriptor updates), use
+ /// [`walk_range`](Self::walk_range) instead.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
@@ -236,22 +343,55 @@ impl<T: Translation + Clone> Mapping<T> {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
- pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
+ if self.active() {
+ self.check_range_bbm(range, f)?;
+ }
self.root.modify_range(range, f)?;
#[cfg(target_arch = "aarch64")]
+ // SAFETY: Safe because this is just a memory barrier.
unsafe {
- // Safe because this is just a memory barrier.
asm!("dsb ishst");
}
Ok(())
}
+
+ /// Applies the provided function to a number of PTEs corresponding to a given memory range.
+ ///
+ /// The virtual address range passed to the callback function may be expanded compared to the
+ /// `range` parameter, due to alignment to block boundaries.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
+ where
+ F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
+ {
+ self.root.walk_range(range, f)
+ }
}
impl<T: Translation + Clone> Drop for Mapping<T> {
fn drop(&mut self) {
if self.previous_ttbr.is_some() {
#[cfg(target_arch = "aarch64")]
- self.deactivate();
+ // SAFETY: When activate was called the caller promised that they wouldn't drop the page
+ // table until its mappings were no longer needed.
+ unsafe {
+ self.deactivate();
+ }
}
}
}
diff --git a/src/linearmap.rs b/src/linearmap.rs
index 921a683..be9d8aa 100644
--- a/src/linearmap.rs
+++ b/src/linearmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, is_aligned, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater,
- Translation, VaRange, VirtualAddress, PAGE_SIZE,
+ deallocate, is_aligned, Attributes, Constraints, Descriptor, MemoryRegion, PageTable,
+ PhysicalAddress, Translation, VaRange, VirtualAddress, PAGE_SIZE,
},
MapError, Mapping,
};
@@ -112,24 +112,38 @@ impl LinearMap {
}
}
- /// Activates the page table by setting `TTBR0_EL1` to point to it, and saves the previous value
- /// of `TTBR0_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
+ /// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
+ /// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
///
- /// Panics if a previous value of `TTBR0_EL1` is already saved and not yet used by a call to
+ /// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
/// `deactivate`.
- #[cfg(target_arch = "aarch64")]
- pub fn activate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the page table doesn't unmap any memory which the program is
+ /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
+ /// dropped as long as its mappings are required, as it will automatically be deactivated when
+ /// it is dropped.
+ pub unsafe fn activate(&mut self) {
self.mapping.activate()
}
- /// Deactivates the page table, by setting `TTBR0_EL1` back to the value it had before
+ /// Deactivates the page table, by setting `TTBRn_EL1` back to the value it had before
/// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
/// configured ASID.
///
- /// Panics if there is no saved `TTRB0_EL1` value because `activate` has not previously been
+ /// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
/// called.
- #[cfg(target_arch = "aarch64")]
- pub fn deactivate(&mut self) {
+ ///
+ /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the previous page table which this is switching back to doesn't
+ /// unmap any memory which the program is using.
+ pub unsafe fn deactivate(&mut self) {
self.mapping.deactivate()
}
@@ -151,19 +165,72 @@ impl LinearMap {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
+ self.map_range_with_constraints(range, flags, Constraints::empty())
+ }
+
+ /// Maps the given range of virtual addresses to the corresponding physical addresses with the
+ /// given flags, taking the given constraints into account.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
+ /// address within the `range` would result in overflow.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn map_range_with_constraints(
+ &mut self,
+ range: &MemoryRegion,
+ flags: Attributes,
+ constraints: Constraints,
+ ) -> Result<(), MapError> {
let pa = self
.mapping
.root
.translation()
.virtual_to_physical(range.start())?;
- self.mapping.map_range(range, pa, flags)
+ self.mapping.map_range(range, pa, flags, constraints)
}
- /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ /// Applies the provided updater function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// This may involve splitting block entries if the provided range is not currently mapped
+ /// down to its precise boundaries. For visiting all the descriptors covering a memory range
+ /// without potential splitting (and no descriptor updates), use
+ /// [`walk_range`](Self::walk_range) instead.
///
- /// The virtual address range passed to the updater function may be expanded compared to the
- /// `range` parameter, due to alignment to block boundaries.
+ /// The updater function receives the following arguments:
+ ///
+ /// - The virtual address range mapped by each page table descriptor. A new descriptor will
+ /// have been allocated before the invocation of the updater function if a page table split
+ /// was needed.
+ /// - A mutable reference to the page table descriptor that permits modifications.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The updater function should return:
+ ///
+ /// - `Ok` to continue updating the remaining entries.
+ /// - `Err` to signal an error and stop updating the remaining entries.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
@@ -178,9 +245,45 @@ impl LinearMap {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
- pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
self.mapping.modify_range(range, f)
}
+
+ /// Applies the provided callback function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// The callback function receives the following arguments:
+ ///
+ /// - The full virtual address range mapped by each visited page table descriptor, which may
+ /// exceed the original range passed to `walk_range`, due to alignment to block boundaries.
+ /// - The page table descriptor itself.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The callback function should return:
+ ///
+ /// - `Ok` to continue visiting the remaining entries.
+ /// - `Err` to signal an error and stop visiting the remaining entries.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
+ where
+ F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
+ {
+ self.mapping.walk_range(range, f)
+ }
}
#[cfg(test)]
@@ -327,7 +430,7 @@ mod tests {
// One byte, with an offset which would map it to a negative IPA.
assert_eq!(
- pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+ pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL,),
Err(MapError::InvalidVirtualAddress(VirtualAddress(0)))
);
}
diff --git a/src/paging.rs b/src/paging.rs
index 512bef2..9dac27b 100644
--- a/src/paging.rs
+++ b/src/paging.rs
@@ -124,7 +124,7 @@ impl Sub<usize> for PhysicalAddress {
/// Returns the size in bytes of the address space covered by a single entry in the page table at
/// the given level.
-fn granularity_at_level(level: usize) -> usize {
+pub(crate) fn granularity_at_level(level: usize) -> usize {
PAGE_SIZE << ((LEAF_LEVEL - level) * BITS_PER_LEVEL)
}
@@ -188,19 +188,10 @@ impl MemoryRegion {
}
/// Returns whether this region can be mapped at 'level' using block mappings only.
- fn is_block(&self, level: usize) -> bool {
+ pub(crate) fn is_block(&self, level: usize) -> bool {
let gran = granularity_at_level(level);
(self.0.start.0 | self.0.end.0) & (gran - 1) == 0
}
-
- /// Returns a new `MemoryRegion` based on this one but with the start aligned down and the end
- /// aligned up to the given alignment.
- fn align_out(&self, alignment: usize) -> Self {
- Self(
- VirtualAddress(align_down(self.0.start.0, alignment))
- ..VirtualAddress(align_up(self.0.end.0, alignment)),
- )
- }
}
impl From<Range<VirtualAddress>> for MemoryRegion {
@@ -221,23 +212,16 @@ impl Debug for MemoryRegion {
}
}
-/// A page table entry updater function; called repeatedly to update the state of a
-/// range of page table entries.
-///
-/// # Arguments
-///
-/// The updater function receives the following arguments:
-///
-/// - The full virtual address range mapped by the page table entry, which may be different than
-/// the original range passed to `modify_range`, due to alignment to block boundaries.
-/// - A page table entry whose state it may update.
-/// - The level of a translation table the entry belongs to.
-///
-/// # Return
-///
-/// - `Ok` to continue updating the remaining entries.
-/// - `Err` to signal an error during an update and stop updating the remaining entries.
-pub type PteUpdater = dyn Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()>;
+bitflags! {
+ /// Constraints on page table mappings
+ #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+ pub struct Constraints: usize {
+ /// Block mappings are not permitted, only page mappings
+ const NO_BLOCK_MAPPINGS = 1 << 0;
+ /// Use of the contiguous hint is not permitted
+ const NO_CONTIGUOUS_HINT = 1 << 1;
+ }
+}
/// A complete hierarchy of page tables including all levels.
pub struct RootTable<T: Translation> {
@@ -278,15 +262,21 @@ impl<T: Translation> RootTable<T> {
/// the pages to the corresponding physical address range starting at `pa`. Block and page
/// entries will be written to, but will only be mapped if `flags` contains `Attributes::VALID`.
///
- /// Returns an error if the virtual address range is out of the range covered by the pagetable
+ /// Returns an error if the virtual address range is out of the range covered by the pagetable,
+ /// or if the `flags` argument has unsupported attributes set.
pub fn map_range(
&mut self,
range: &MemoryRegion,
pa: PhysicalAddress,
flags: Attributes,
+ constraints: Constraints,
) -> Result<(), MapError> {
+ if flags.contains(Attributes::TABLE_OR_PAGE) {
+ return Err(MapError::InvalidFlags(Attributes::TABLE_OR_PAGE));
+ }
self.verify_region(range)?;
- self.table.map_range(&self.translation, range, pa, flags);
+ self.table
+ .map_range(&self.translation, range, pa, flags, constraints);
Ok(())
}
@@ -305,11 +295,82 @@ impl<T: Translation> RootTable<T> {
&self.translation
}
- pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ /// Applies the provided updater function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// This may involve splitting block entries if the provided range is not currently mapped
+ /// down to its precise boundaries. For visiting all the descriptors covering a memory range
+ /// without potential splitting (and no descriptor updates), use
+ /// [`walk_range`](Self::walk_range) instead.
+ ///
+ /// The updater function receives the following arguments:
+ ///
+ /// - The virtual address range mapped by each page table descriptor. A new descriptor will
+ /// have been allocated before the invocation of the updater function if a page table split
+ /// was needed.
+ /// - A mutable reference to the page table descriptor that permits modifications.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The updater function should return:
+ ///
+ /// - `Ok` to continue updating the remaining entries.
+ /// - `Err` to signal an error and stop updating the remaining entries.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ ///
+ /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
+ /// and modifying those would violate architectural break-before-make (BBM) requirements.
+ pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
self.verify_region(range)?;
self.table.modify_range(&self.translation, range, f)
}
+ /// Applies the provided callback function to the page table descriptors covering a given
+ /// memory range.
+ ///
+ /// The callback function receives the following arguments:
+ ///
+ /// - The full virtual address range mapped by each visited page table descriptor, which may
+ /// exceed the original range passed to `walk_range`, due to alignment to block boundaries.
+ /// - The page table descriptor itself.
+ /// - The level of a translation table the descriptor belongs to.
+ ///
+ /// The callback function should return:
+ ///
+ /// - `Ok` to continue visiting the remaining entries.
+ /// - `Err` to signal an error and stop visiting the remaining entries.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
+ where
+ F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
+ {
+ self.verify_region(range)?;
+ self.table.walk_range(&self.translation, range, f)
+ }
+
/// Returns the level of mapping used for the given virtual address:
/// - `None` if it is unmapped
/// - `Some(LEAF_LEVEL)` if it is mapped as a single page
@@ -446,18 +507,17 @@ impl<T: Translation> PageTableWithLevel<T> {
Self {
table,
level,
- _translation: PhantomData::default(),
+ _translation: PhantomData,
}
}
/// Returns a reference to the descriptor corresponding to a given virtual address.
- #[cfg(test)]
fn get_entry(&self, va: VirtualAddress) -> &Descriptor {
let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
- // Safe because we know that the pointer is properly aligned, dereferenced and initialised,
- // and nothing else can access the page table while we hold a mutable reference to the
- // PageTableWithLevel (assuming it is not currently active).
+ // SAFETY: Safe because we know that the pointer is properly aligned, dereferenced and
+ // initialised, and nothing else can access the page table while we hold a mutable reference
+ // to the PageTableWithLevel (assuming it is not currently active).
let table = unsafe { self.table.as_ref() };
&table.entries[index]
}
@@ -466,13 +526,44 @@ impl<T: Translation> PageTableWithLevel<T> {
fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor {
let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
- // Safe because we know that the pointer is properly aligned, dereferenced and initialised,
- // and nothing else can access the page table while we hold a mutable reference to the
- // PageTableWithLevel (assuming it is not currently active).
+ // SAFETY: Safe because we know that the pointer is properly aligned, dereferenced and
+ // initialised, and nothing else can access the page table while we hold a mutable reference
+ // to the PageTableWithLevel (assuming it is not currently active).
let table = unsafe { self.table.as_mut() };
&mut table.entries[index]
}
+ /// Convert the descriptor in `entry` from a block mapping to a table mapping of
+ /// the same range with the same attributes
+ fn split_entry(
+ translation: &T,
+ chunk: &MemoryRegion,
+ entry: &mut Descriptor,
+ level: usize,
+ ) -> Self {
+ let granularity = granularity_at_level(level);
+ let old = *entry;
+ let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
+ if let Some(old_flags) = old.flags() {
+ if !old_flags.contains(Attributes::TABLE_OR_PAGE) {
+ let old_pa = old.output_address();
+ // `old` was a block entry, so we need to split it.
+ // Recreate the entire block in the newly added table.
+ let a = align_down(chunk.0.start.0, granularity);
+ let b = align_up(chunk.0.end.0, granularity);
+ subtable.map_range(
+ translation,
+ &MemoryRegion::new(a, b),
+ old_pa,
+ old_flags,
+ Constraints::empty(),
+ );
+ }
+ }
+ entry.set(subtable_pa, Attributes::TABLE_OR_PAGE | Attributes::VALID);
+ subtable
+ }
+
/// Maps the the given virtual address range in this pagetable to the corresponding physical
/// address range starting at the given `pa`, recursing into any subtables as necessary. To map
/// block and page entries, `Attributes::VALID` must be set in `flags`.
@@ -488,6 +579,7 @@ impl<T: Translation> PageTableWithLevel<T> {
range: &MemoryRegion,
mut pa: PhysicalAddress,
flags: Attributes,
+ constraints: Constraints,
) {
let level = self.level;
let granularity = granularity_at_level(level);
@@ -501,36 +593,17 @@ impl<T: Translation> PageTableWithLevel<T> {
} else if chunk.is_block(level)
&& !entry.is_table_or_page()
&& is_aligned(pa.0, granularity)
+ && !constraints.contains(Constraints::NO_BLOCK_MAPPINGS)
{
// Rather than leak the entire subhierarchy, only put down
// a block mapping if the region is not already covered by
// a table mapping.
entry.set(pa, flags | Attributes::ACCESSED);
} else {
- let mut subtable = if let Some(subtable) = entry.subtable(translation, level) {
- subtable
- } else {
- let old = *entry;
- let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
- if let Some(old_flags) = old.flags() {
- if !old_flags.contains(Attributes::TABLE_OR_PAGE) {
- let old_pa = old.output_address();
- // `old` was a block entry, so we need to split it.
- // Recreate the entire block in the newly added table.
- let a = align_down(chunk.0.start.0, granularity);
- let b = align_up(chunk.0.end.0, granularity);
- subtable.map_range(
- translation,
- &MemoryRegion::new(a, b),
- old_pa,
- old_flags,
- );
- }
- }
- entry.set(subtable_pa, Attributes::TABLE_OR_PAGE | Attributes::VALID);
- subtable
- };
- subtable.map_range(translation, &chunk, pa, flags);
+ let mut subtable = entry
+ .subtable(translation, level)
+ .unwrap_or_else(|| Self::split_entry(translation, &chunk, entry, level));
+ subtable.map_range(translation, &chunk, pa, flags, constraints);
}
pa.0 += chunk.len();
}
@@ -543,8 +616,8 @@ impl<T: Translation> PageTableWithLevel<T> {
indentation: usize,
) -> Result<(), fmt::Error> {
const WIDTH: usize = 3;
- // Safe because we know that the pointer is aligned, initialised and dereferencable, and the
- // PageTable won't be mutated while we are using it.
+ // SAFETY: Safe because we know that the pointer is aligned, initialised and dereferencable,
+ // and the PageTable won't be mutated while we are using it.
let table = unsafe { self.table.as_ref() };
let mut i = 0;
@@ -577,8 +650,8 @@ impl<T: Translation> PageTableWithLevel<T> {
/// Frees the memory used by this pagetable and all subtables. It is not valid to access the
/// page table after this.
fn free(&mut self, translation: &T) {
- // Safe because we know that the pointer is aligned, initialised and dereferencable, and the
- // PageTable won't be mutated while we are freeing it.
+ // SAFETY: Safe because we know that the pointer is aligned, initialised and dereferencable,
+ // and the PageTable won't be mutated while we are freeing it.
let table = unsafe { self.table.as_ref() };
for entry in table.entries {
if let Some(mut subtable) = entry.subtable(translation, self.level) {
@@ -587,7 +660,7 @@ impl<T: Translation> PageTableWithLevel<T> {
subtable.free(translation);
}
}
- // Safe because the table was allocated by `PageTableWithLevel::new` with the global
+ // SAFETY: Safe because the table was allocated by `PageTableWithLevel::new` with the global
// allocator and appropriate layout.
unsafe {
// Actually free the memory used by the `PageTable`.
@@ -596,22 +669,54 @@ impl<T: Translation> PageTableWithLevel<T> {
}
/// Modifies a range of page table entries by applying a function to each page table entry.
- /// If the range is not aligned to block boundaries, it will be expanded.
- fn modify_range(
+ /// If the range is not aligned to block boundaries, block descriptors will be split up.
+ fn modify_range<F>(
&mut self,
translation: &T,
range: &MemoryRegion,
- f: &PteUpdater,
- ) -> Result<(), MapError> {
+ f: &F,
+ ) -> Result<(), MapError>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
+ {
let level = self.level;
for chunk in range.split(level) {
- // VA range passed to the updater is aligned to block boundaries, as that region will
- // be affected by changes to the entry.
- let affected_range = chunk.align_out(granularity_at_level(level));
let entry = self.get_entry_mut(chunk.0.start);
- f(&affected_range, entry, level).map_err(|_| MapError::PteUpdateFault(*entry))?;
- if let Some(mut subtable) = entry.subtable(translation, level) {
+ if let Some(mut subtable) = entry.subtable(translation, level).or_else(|| {
+ if !chunk.is_block(level) {
+ // The current chunk is not aligned to the block size at this level
+ // Split it before recursing to the next level
+ Some(Self::split_entry(translation, &chunk, entry, level))
+ } else {
+ None
+ }
+ }) {
subtable.modify_range(translation, &chunk, f)?;
+ } else {
+ f(&chunk, entry, level).map_err(|_| MapError::PteUpdateFault(*entry))?;
+ }
+ }
+ Ok(())
+ }
+
+ /// Walks a range of page table entries and passes each one to a caller provided function
+ /// If the range is not aligned to block boundaries, it will be expanded.
+ fn walk_range<F>(
+ &self,
+ translation: &T,
+ range: &MemoryRegion,
+ f: &mut F,
+ ) -> Result<(), MapError>
+ where
+ F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
+ {
+ let level = self.level;
+ for chunk in range.split(level) {
+ let entry = self.get_entry(chunk.0.start);
+ if let Some(subtable) = entry.subtable(translation, level) {
+ subtable.walk_range(translation, &chunk, f)?;
+ } else {
+ f(&chunk, entry, level).map_err(|_| MapError::PteUpdateFault(*entry))?;
}
}
Ok(())
@@ -647,8 +752,8 @@ impl PageTable {
/// allocator and returns a pointer to it.
#[cfg(feature = "alloc")]
pub fn new() -> NonNull<Self> {
- // Safe because the pointer has been allocated with the appropriate layout by the global
- // allocator, and the memory is zeroed which is valid initialisation for a PageTable.
+ // SAFETY: Safe because the pointer has been allocated with the appropriate layout by the
+ // global allocator, and the memory is zeroed which is valid initialisation for a PageTable.
unsafe { allocate_zeroed() }
}
}
@@ -667,7 +772,7 @@ pub struct Descriptor(usize);
impl Descriptor {
const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48);
- fn output_address(self) -> PhysicalAddress {
+ pub(crate) fn output_address(self) -> PhysicalAddress {
PhysicalAddress(self.0 & Self::PHYSICAL_ADDRESS_BITMASK)
}
@@ -678,8 +783,15 @@ impl Descriptor {
}
/// Modifies the page table entry by setting or clearing its flags.
+ /// Panics when attempting to convert a table descriptor into a block/page descriptor or vice
+ /// versa - this is not supported via this API.
pub fn modify_flags(&mut self, set: Attributes, clear: Attributes) {
- self.0 = (self.0 | set.bits()) & !clear.bits();
+ let flags = (self.0 | set.bits()) & !clear.bits();
+
+ if (self.0 ^ flags) & Attributes::TABLE_OR_PAGE.bits() != 0 {
+ panic!("Cannot convert between table and block/page descriptors\n");
+ }
+ self.0 = flags;
}
/// Returns `true` if [`Attributes::VALID`] is set on this entry, e.g. if the entry is mapped.
@@ -696,7 +808,7 @@ impl Descriptor {
}
}
- fn set(&mut self, pa: PhysicalAddress, flags: Attributes) {
+ pub(crate) fn set(&mut self, pa: PhysicalAddress, flags: Attributes) {
self.0 = (pa.0 & Self::PHYSICAL_ADDRESS_BITMASK) | flags.bits();
}
@@ -876,6 +988,18 @@ mod tests {
);
}
+ #[test]
+ #[should_panic]
+ fn modify_descriptor_table_or_page_flag() {
+ let mut desc = Descriptor(0usize);
+ assert!(!desc.is_valid());
+ desc.set(
+ PhysicalAddress(0x12340000),
+ Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1,
+ );
+ desc.modify_flags(Attributes::VALID, Attributes::TABLE_OR_PAGE);
+ }
+
#[cfg(feature = "alloc")]
#[test]
fn unaligned_chunks() {