aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Walbran <qwandor@google.com>2023-01-09 17:07:31 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-01-09 17:07:31 +0000
commit146cc82e8989856bc8cd1269bd1826af7e309af3 (patch)
treecb299b8e33a08f99cabfdad2b1f2ad0ddbb60ec3
parent2a01832db0633317df7313d7eb690608a04c4efa (diff)
parent0f8a4c2e2f1023badffad5de32abf1258b97846a (diff)
downloadvirtio-drivers-146cc82e8989856bc8cd1269bd1826af7e309af3.tar.gz
Upgrade virtio-drivers to 0.2.0 am: 0038e82f91 am: 0f8a4c2e2f
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/virtio-drivers/+/2378708 Change-Id: Icf3904dee983163b0b70cc13723997ab6a511288 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp6
-rw-r--r--Cargo.toml7
-rw-r--r--Cargo.toml.orig3
-rw-r--r--METADATA14
-rw-r--r--README.md21
-rw-r--r--cargo2android.json3
-rw-r--r--patches/Android.bp.patch16
-rw-r--r--src/blk.rs325
-rw-r--r--src/device/blk.rs455
-rw-r--r--src/device/console.rs (renamed from src/console.rs)140
-rw-r--r--src/device/gpu.rs (renamed from src/gpu.rs)55
-rw-r--r--src/device/input.rs (renamed from src/input.rs)24
-rw-r--r--src/device/mod.rs8
-rw-r--r--src/device/net.rs (renamed from src/net.rs)40
-rw-r--r--src/hal.rs37
-rw-r--r--src/hal/fake.rs19
-rw-r--r--src/lib.rs78
-rw-r--r--src/queue.rs177
-rw-r--r--src/transport/fake.rs4
-rw-r--r--src/transport/mmio.rs4
-rw-r--r--src/transport/mod.rs2
-rw-r--r--src/transport/pci.rs1
-rw-r--r--src/transport/pci/bus.rs34
-rw-r--r--src/volatile.rs6
25 files changed, 929 insertions, 552 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index cb95af7..b52b894 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "e616945c47fc9502839f49e02757ad49fbb309a1"
+ "sha1": "33e4393c34fd6e7dd89574b12f36b59d39a31923"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 803ec85..230c659 100644
--- a/Android.bp
+++ b/Android.bp
@@ -26,13 +26,14 @@ rust_library {
host_supported: true,
crate_name: "virtio_drivers",
cargo_env_compat: true,
- cargo_pkg_version: "0.1.0",
+ cargo_pkg_version: "0.2.0",
srcs: ["src/lib.rs"],
edition: "2018",
no_stdlibs: true,
rustlibs: [
"libbitflags",
"liblog_rust_nostd",
+ "libzerocopy",
],
apex_available: [
"//apex_available:platform",
@@ -46,7 +47,7 @@ rust_test {
host_supported: true,
crate_name: "virtio_drivers",
cargo_env_compat: true,
- cargo_pkg_version: "0.1.0",
+ cargo_pkg_version: "0.2.0",
srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -57,5 +58,6 @@ rust_test {
rustlibs: [
"libbitflags",
"liblog_rust",
+ "libzerocopy",
],
}
diff --git a/Cargo.toml b/Cargo.toml
index 2767964..22d9479 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "virtio-drivers"
-version = "0.1.0"
+version = "0.2.0"
authors = [
"Jiajie Chen <noc@jiegec.ac.cn>",
"Runji Wang <wangrunji0408@163.com>",
@@ -35,6 +35,9 @@ version = "1.3"
[dependencies.log]
version = "0.4"
+[dependencies.zerocopy]
+version = "0.6.1"
+
[features]
-default = ["alloc"]
alloc = []
+default = ["alloc"]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index cc52f7c..a245f56 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "virtio-drivers"
-version = "0.1.0"
+version = "0.2.0"
license = "MIT"
authors = [
"Jiajie Chen <noc@jiegec.ac.cn>",
@@ -17,6 +17,7 @@ categories = ["hardware-support", "no-std"]
[dependencies]
log = "0.4"
bitflags = "1.3"
+zerocopy = "0.6.1"
[features]
default = ["alloc"]
diff --git a/METADATA b/METADATA
index edb3f48..ee4f5b7 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/virtio-drivers
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
name: "virtio-drivers"
description: "VirtIO guest drivers."
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/virtio-drivers/virtio-drivers-0.1.0.crate"
+ value: "https://static.crates.io/crates/virtio-drivers/virtio-drivers-0.2.0.crate"
}
- version: "0.1.0"
+ version: "0.2.0"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 9
- day: 30
+ year: 2023
+ month: 1
+ day: 6
}
}
diff --git a/README.md b/README.md
index e3016bb..88b86f9 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
# VirtIO-drivers-rs
+[![crates.io page](https://img.shields.io/crates/v/aarch64-paging.svg)](https://crates.io/crates/aarch64-paging)
+[![docs.rs page](https://docs.rs/aarch64-paging/badge.svg)](https://docs.rs/aarch64-paging)
[![CI](https://github.com/rcore-os/virtio-drivers/workflows/CI/badge.svg?branch=master)](https://github.com/rcore-os/virtio-drivers/actions)
VirtIO guest drivers in Rust. For **no_std** environment.
@@ -43,5 +45,20 @@ VirtIO guest drivers in Rust. For **no_std** environment.
- x86_64 (TODO)
-- [aarch64](./examples/aarch64)
-- [RISCV](./examples/riscv)
+### [aarch64](./examples/aarch64)
+
+```bash
+cd examples/aarch64
+make qemu
+```
+
+### [RISCV](./examples/riscv)
+
+```bash
+cd examples/riscv
+make qemu
+```
+
+You will see device info & GUI Window in qemu.
+
+<img decoding="async" src="https://github.com/rcore-os/virtio-drivers/raw/master/examples/riscv/virtio-test-gpu.png" width="50%">
diff --git a/cargo2android.json b/cargo2android.json
index b893c29..1893a8e 100644
--- a/cargo2android.json
+++ b/cargo2android.json
@@ -2,6 +2,7 @@
"dependencies": true,
"device": true,
"features": "",
+ "patch": "patches/Android.bp.patch",
"run": true,
"tests": true
-} \ No newline at end of file
+}
diff --git a/patches/Android.bp.patch b/patches/Android.bp.patch
new file mode 100644
index 0000000..54cf1b4
--- /dev/null
+++ b/patches/Android.bp.patch
@@ -0,0 +1,16 @@
+diff --git a/Android.bp b/Android.bp
+index b08769d..230c659 100644
+--- a/Android.bp
++++ b/Android.bp
+@@ -29,9 +29,10 @@ rust_library {
+ cargo_pkg_version: "0.2.0",
+ srcs: ["src/lib.rs"],
+ edition: "2018",
++ no_stdlibs: true,
+ rustlibs: [
+ "libbitflags",
+- "liblog_rust",
++ "liblog_rust_nostd",
+ "libzerocopy",
+ ],
+ apex_available: [
diff --git a/src/blk.rs b/src/blk.rs
deleted file mode 100644
index 2f4b949..0000000
--- a/src/blk.rs
+++ /dev/null
@@ -1,325 +0,0 @@
-use super::*;
-use crate::queue::VirtQueue;
-use crate::transport::Transport;
-use crate::volatile::{volread, Volatile};
-use bitflags::*;
-use log::*;
-
-const QUEUE: u16 = 0;
-
-/// The virtio block device is a simple virtual block device (ie. disk).
-///
-/// Read and write requests (and other exotic requests) are placed in the queue,
-/// and serviced (probably out of order) by the device except where noted.
-pub struct VirtIOBlk<H: Hal, T: Transport> {
- transport: T,
- queue: VirtQueue<H>,
- capacity: u64,
-}
-
-impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
- /// Create a new VirtIO-Blk driver.
- pub fn new(mut transport: T) -> Result<Self> {
- transport.begin_init(|features| {
- let features = BlkFeature::from_bits_truncate(features);
- info!("device features: {:?}", features);
- // negotiate these flags only
- let supported_features = BlkFeature::empty();
- (features & supported_features).bits()
- });
-
- // read configuration space
- let config = transport.config_space::<BlkConfig>()?;
- info!("config: {:?}", config);
- // Safe because config is a valid pointer to the device configuration space.
- let capacity = unsafe {
- volread!(config, capacity_low) as u64 | (volread!(config, capacity_high) as u64) << 32
- };
- info!("found a block device of size {}KB", capacity / 2);
-
- let queue = VirtQueue::new(&mut transport, QUEUE, 16)?;
- transport.finish_init();
-
- Ok(VirtIOBlk {
- transport,
- queue,
- capacity,
- })
- }
-
- /// Gets the capacity of the block device, in 512 byte sectors.
- pub fn capacity(&self) -> u64 {
- self.capacity
- }
-
- /// Acknowledge interrupt.
- pub fn ack_interrupt(&mut self) -> bool {
- self.transport.ack_interrupt()
- }
-
- /// Read a block.
- pub fn read_block(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
- assert_eq!(buf.len(), BLK_SIZE);
- let req = BlkReq {
- type_: ReqType::In,
- reserved: 0,
- sector: block_id as u64,
- };
- let mut resp = BlkResp::default();
- self.queue.add_notify_wait_pop(
- &[req.as_buf()],
- &[buf, resp.as_buf_mut()],
- &mut self.transport,
- )?;
- match resp.status {
- RespStatus::Ok => Ok(()),
- _ => Err(Error::IoError),
- }
- }
-
- /// Read a block in a non-blocking way which means that it returns immediately.
- ///
- /// # Arguments
- ///
- /// * `block_id` - The identifier of the block to read.
- /// * `buf` - The buffer in the memory which the block is read into.
- /// * `resp` - A mutable reference to a variable provided by the caller
- /// which contains the status of the requests. The caller can safely
- /// read the variable only after the request is ready.
- ///
- /// # Usage
- ///
- /// It will submit request to the virtio block device and return a token identifying
- /// the position of the first Descriptor in the chain. If there are not enough
- /// Descriptors to allocate, then it returns [Error::BufferTooSmall].
- ///
- /// After the request is ready, `resp` will be updated and the caller can get the
- /// status of the request(e.g. succeed or failed) through it. However, the caller
- /// **must not** spin on `resp` to wait for it to change. A safe way is to read it
- /// after the same token as this method returns is fetched through [VirtIOBlk::pop_used()],
- /// which means that the request has been ready.
- ///
- /// # Safety
- ///
- /// `buf` is still borrowed by the underlying virtio block device even if this
- /// method returns. Thus, it is the caller's responsibility to guarantee that
- /// `buf` is not accessed before the request is completed in order to avoid
- /// data races.
- pub unsafe fn read_block_nb(
- &mut self,
- block_id: usize,
- buf: &mut [u8],
- resp: &mut BlkResp,
- ) -> Result<u16> {
- assert_eq!(buf.len(), BLK_SIZE);
- let req = BlkReq {
- type_: ReqType::In,
- reserved: 0,
- sector: block_id as u64,
- };
- let token = self.queue.add(&[req.as_buf()], &[buf, resp.as_buf_mut()])?;
- self.transport.notify(QUEUE);
- Ok(token)
- }
-
- /// Write a block.
- pub fn write_block(&mut self, block_id: usize, buf: &[u8]) -> Result {
- assert_eq!(buf.len(), BLK_SIZE);
- let req = BlkReq {
- type_: ReqType::Out,
- reserved: 0,
- sector: block_id as u64,
- };
- let mut resp = BlkResp::default();
- self.queue.add_notify_wait_pop(
- &[req.as_buf(), buf],
- &[resp.as_buf_mut()],
- &mut self.transport,
- )?;
- match resp.status {
- RespStatus::Ok => Ok(()),
- _ => Err(Error::IoError),
- }
- }
-
- //// Write a block in a non-blocking way which means that it returns immediately.
- ///
- /// # Arguments
- ///
- /// * `block_id` - The identifier of the block to write.
- /// * `buf` - The buffer in the memory containing the data to write to the block.
- /// * `resp` - A mutable reference to a variable provided by the caller
- /// which contains the status of the requests. The caller can safely
- /// read the variable only after the request is ready.
- ///
- /// # Usage
- ///
- /// See also [VirtIOBlk::read_block_nb()].
- ///
- /// # Safety
- ///
- /// See also [VirtIOBlk::read_block_nb()].
- pub unsafe fn write_block_nb(
- &mut self,
- block_id: usize,
- buf: &[u8],
- resp: &mut BlkResp,
- ) -> Result<u16> {
- assert_eq!(buf.len(), BLK_SIZE);
- let req = BlkReq {
- type_: ReqType::Out,
- reserved: 0,
- sector: block_id as u64,
- };
- let token = self.queue.add(&[req.as_buf(), buf], &[resp.as_buf_mut()])?;
- self.transport.notify(QUEUE);
- Ok(token)
- }
-
- /// During an interrupt, it fetches a token of a completed request from the used
- /// ring and return it. If all completed requests have already been fetched, return
- /// Err(Error::NotReady).
- pub fn pop_used(&mut self) -> Result<u16> {
- self.queue.pop_used().map(|p| p.0)
- }
-
- /// Return size of its VirtQueue.
- /// It can be used to tell the caller how many channels he should monitor on.
- pub fn virt_queue_size(&self) -> u16 {
- self.queue.size()
- }
-}
-
-impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
- fn drop(&mut self) {
- // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
- // after they have been freed.
- self.transport.queue_unset(QUEUE);
- }
-}
-
-#[repr(C)]
-struct BlkConfig {
- /// Number of 512 Bytes sectors
- capacity_low: Volatile<u32>,
- capacity_high: Volatile<u32>,
- size_max: Volatile<u32>,
- seg_max: Volatile<u32>,
- cylinders: Volatile<u16>,
- heads: Volatile<u8>,
- sectors: Volatile<u8>,
- blk_size: Volatile<u32>,
- physical_block_exp: Volatile<u8>,
- alignment_offset: Volatile<u8>,
- min_io_size: Volatile<u16>,
- opt_io_size: Volatile<u32>,
- // ... ignored
-}
-
-#[repr(C)]
-#[derive(Debug)]
-struct BlkReq {
- type_: ReqType,
- reserved: u32,
- sector: u64,
-}
-
-/// Response of a VirtIOBlk request.
-#[repr(C)]
-#[derive(Debug)]
-pub struct BlkResp {
- status: RespStatus,
-}
-
-impl BlkResp {
- /// Return the status of a VirtIOBlk request.
- pub fn status(&self) -> RespStatus {
- self.status
- }
-}
-
-#[repr(u32)]
-#[derive(Debug)]
-enum ReqType {
- In = 0,
- Out = 1,
- Flush = 4,
- Discard = 11,
- WriteZeroes = 13,
-}
-
-/// Status of a VirtIOBlk request.
-#[repr(u8)]
-#[derive(Debug, Eq, PartialEq, Copy, Clone)]
-pub enum RespStatus {
- /// Ok.
- Ok = 0,
- /// IoErr.
- IoErr = 1,
- /// Unsupported yet.
- Unsupported = 2,
- /// Not ready.
- _NotReady = 3,
-}
-
-impl Default for BlkResp {
- fn default() -> Self {
- BlkResp {
- status: RespStatus::_NotReady,
- }
- }
-}
-
-const BLK_SIZE: usize = 512;
-
-bitflags! {
- struct BlkFeature: u64 {
- /// Device supports request barriers. (legacy)
- const BARRIER = 1 << 0;
- /// Maximum size of any single segment is in `size_max`.
- const SIZE_MAX = 1 << 1;
- /// Maximum number of segments in a request is in `seg_max`.
- const SEG_MAX = 1 << 2;
- /// Disk-style geometry specified in geometry.
- const GEOMETRY = 1 << 4;
- /// Device is read-only.
- const RO = 1 << 5;
- /// Block size of disk is in `blk_size`.
- const BLK_SIZE = 1 << 6;
- /// Device supports scsi packet commands. (legacy)
- const SCSI = 1 << 7;
- /// Cache flush command support.
- const FLUSH = 1 << 9;
- /// Device exports information on optimal I/O alignment.
- const TOPOLOGY = 1 << 10;
- /// Device can toggle its cache between writeback and writethrough modes.
- const CONFIG_WCE = 1 << 11;
- /// Device can support discard command, maximum discard sectors size in
- /// `max_discard_sectors` and maximum discard segment number in
- /// `max_discard_seg`.
- const DISCARD = 1 << 13;
- /// Device can support write zeroes command, maximum write zeroes sectors
- /// size in `max_write_zeroes_sectors` and maximum write zeroes segment
- /// number in `max_write_zeroes_seg`.
- const WRITE_ZEROES = 1 << 14;
-
- // device independent
- const NOTIFY_ON_EMPTY = 1 << 24; // legacy
- const ANY_LAYOUT = 1 << 27; // legacy
- const RING_INDIRECT_DESC = 1 << 28;
- const RING_EVENT_IDX = 1 << 29;
- const UNUSED = 1 << 30; // legacy
- const VERSION_1 = 1 << 32; // detect legacy
-
- // the following since virtio v1.1
- const ACCESS_PLATFORM = 1 << 33;
- const RING_PACKED = 1 << 34;
- const IN_ORDER = 1 << 35;
- const ORDER_PLATFORM = 1 << 36;
- const SR_IOV = 1 << 37;
- const NOTIFICATION_DATA = 1 << 38;
- }
-}
-
-unsafe impl AsBuf for BlkReq {}
-unsafe impl AsBuf for BlkResp {}
diff --git a/src/device/blk.rs b/src/device/blk.rs
new file mode 100644
index 0000000..547f7fa
--- /dev/null
+++ b/src/device/blk.rs
@@ -0,0 +1,455 @@
+//! Driver for VirtIO block devices.
+
+use crate::hal::Hal;
+use crate::queue::VirtQueue;
+use crate::transport::Transport;
+use crate::volatile::{volread, Volatile};
+use crate::{Error, Result};
+use bitflags::bitflags;
+use log::info;
+use zerocopy::{AsBytes, FromBytes};
+
+const QUEUE: u16 = 0;
+
+/// Driver for a VirtIO block device.
+///
+/// This is a simple virtual block device, e.g. disk.
+///
+/// Read and write requests (and other exotic requests) are placed in the queue and serviced
+/// (probably out of order) by the device except where noted.
+///
+/// # Example
+///
+/// ```
+/// # use virtio_drivers::{Error, Hal};
+/// # use virtio_drivers::transport::Transport;
+/// use virtio_drivers::device::blk::{VirtIOBlk, SECTOR_SIZE};
+///
+/// # fn example<HalImpl: Hal, T: Transport>(transport: T) -> Result<(), Error> {
+/// let mut disk = VirtIOBlk::<HalImpl, _>::new(transport)?;
+///
+/// println!("VirtIO block device: {} kB", disk.capacity() * SECTOR_SIZE as u64 / 2);
+///
+/// // Read sector 0 and then copy it to sector 1.
+/// let mut buf = [0; SECTOR_SIZE];
+/// disk.read_block(0, &mut buf)?;
+/// disk.write_block(1, &buf)?;
+/// # Ok(())
+/// # }
+/// ```
+pub struct VirtIOBlk<H: Hal, T: Transport> {
+ transport: T,
+ queue: VirtQueue<H>,
+ capacity: u64,
+ readonly: bool,
+}
+
+impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
+ /// Create a new VirtIO-Blk driver.
+ pub fn new(mut transport: T) -> Result<Self> {
+ let mut readonly = false;
+
+ transport.begin_init(|features| {
+ let features = BlkFeature::from_bits_truncate(features);
+ info!("device features: {:?}", features);
+ readonly = features.contains(BlkFeature::RO);
+ // negotiate these flags only
+ let supported_features = BlkFeature::empty();
+ (features & supported_features).bits()
+ });
+
+ // read configuration space
+ let config = transport.config_space::<BlkConfig>()?;
+ info!("config: {:?}", config);
+ // Safe because config is a valid pointer to the device configuration space.
+ let capacity = unsafe {
+ volread!(config, capacity_low) as u64 | (volread!(config, capacity_high) as u64) << 32
+ };
+ info!("found a block device of size {}KB", capacity / 2);
+
+ let queue = VirtQueue::new(&mut transport, QUEUE, 16)?;
+ transport.finish_init();
+
+ Ok(VirtIOBlk {
+ transport,
+ queue,
+ capacity,
+ readonly,
+ })
+ }
+
+ /// Gets the capacity of the block device, in 512 byte ([`SECTOR_SIZE`]) sectors.
+ pub fn capacity(&self) -> u64 {
+ self.capacity
+ }
+
+ /// Returns true if the block device is read-only, or false if it allows writes.
+ pub fn readonly(&self) -> bool {
+ self.readonly
+ }
+
+ /// Acknowledges a pending interrupt, if any.
+ ///
+ /// Returns true if there was an interrupt to acknowledge.
+ pub fn ack_interrupt(&mut self) -> bool {
+ self.transport.ack_interrupt()
+ }
+
+ /// Reads a block into the given buffer.
+ ///
+ /// Blocks until the read completes or there is an error.
+ pub fn read_block(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
+ assert_eq!(buf.len(), SECTOR_SIZE);
+ let req = BlkReq {
+ type_: ReqType::In,
+ reserved: 0,
+ sector: block_id as u64,
+ };
+ let mut resp = BlkResp::default();
+ self.queue.add_notify_wait_pop(
+ &[req.as_bytes()],
+ &[buf, resp.as_bytes_mut()],
+ &mut self.transport,
+ )?;
+ match resp.status {
+ RespStatus::OK => Ok(()),
+ _ => Err(Error::IoError),
+ }
+ }
+
+ /// Submits a request to read a block, but returns immediately without waiting for the read to
+ /// complete.
+ ///
+ /// # Arguments
+ ///
+ /// * `block_id` - The identifier of the block to read.
+ /// * `req` - A buffer which the driver can use for the request to send to the device. The
+ /// contents don't matter as `read_block_nb` will initialise it, but like the other buffers it
+ /// needs to be valid (and not otherwise used) until the corresponding `complete_read_block`
+ /// call.
+ /// * `buf` - The buffer in memory into which the block should be read.
+ /// * `resp` - A mutable reference to a variable provided by the caller
+ /// to contain the status of the request. The caller can safely
+ /// read the variable only after the request is complete.
+ ///
+ /// # Usage
+ ///
+ /// It will submit request to the VirtIO block device and return a token identifying
+ /// the position of the first Descriptor in the chain. If there are not enough
+ /// Descriptors to allocate, then it returns [`Error::QueueFull`].
+ ///
+ /// The caller can then call `peek_used` with the returned token to check whether the device has
+ /// finished handling the request. Once it has, the caller must call `complete_read_block` with
+ /// the same buffers before reading the response.
+ ///
+ /// ```
+ /// # use virtio_drivers::{Error, Hal};
+ /// # use virtio_drivers::device::blk::VirtIOBlk;
+ /// # use virtio_drivers::transport::Transport;
+ /// use virtio_drivers::device::blk::{BlkReq, BlkResp, RespStatus};
+ ///
+ /// # fn example<H: Hal, T: Transport>(blk: &mut VirtIOBlk<H, T>) -> Result<(), Error> {
+ /// let mut request = BlkReq::default();
+ /// let mut buffer = [0; 512];
+ /// let mut response = BlkResp::default();
+ /// let token = unsafe { blk.read_block_nb(42, &mut request, &mut buffer, &mut response) }?;
+ ///
+ /// // Wait for an interrupt to tell us that the request completed...
+ /// assert_eq!(blk.peek_used(), Some(token));
+ ///
+ /// unsafe {
+ /// blk.complete_read_block(token, &request, &mut buffer, &mut response)?;
+ /// }
+ /// if response.status() == RespStatus::OK {
+ /// println!("Successfully read block.");
+ /// } else {
+ /// println!("Error {:?} reading block.", response.status());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// `req`, `buf` and `resp` are still borrowed by the underlying VirtIO block device even after
+ /// this method returns. Thus, it is the caller's responsibility to guarantee that they are not
+ /// accessed before the request is completed in order to avoid data races.
+ pub unsafe fn read_block_nb(
+ &mut self,
+ block_id: usize,
+ req: &mut BlkReq,
+ buf: &mut [u8],
+ resp: &mut BlkResp,
+ ) -> Result<u16> {
+ assert_eq!(buf.len(), SECTOR_SIZE);
+ *req = BlkReq {
+ type_: ReqType::In,
+ reserved: 0,
+ sector: block_id as u64,
+ };
+ let token = self
+ .queue
+ .add(&[req.as_bytes()], &[buf, resp.as_bytes_mut()])?;
+ self.transport.notify(QUEUE);
+ Ok(token)
+ }
+
+ /// Completes a read operation which was started by `read_block_nb`.
+ ///
+ /// # Safety
+ ///
+ /// The same buffers must be passed in again as were passed to `read_block_nb` when it returned
+ /// the token.
+ pub unsafe fn complete_read_block(
+ &mut self,
+ token: u16,
+ req: &BlkReq,
+ buf: &mut [u8],
+ resp: &mut BlkResp,
+ ) -> Result<()> {
+ self.queue
+ .pop_used(token, &[req.as_bytes()], &[buf, resp.as_bytes_mut()])?;
+ Ok(())
+ }
+
+ /// Writes the contents of the given buffer to a block.
+ ///
+ /// Blocks until the write is complete or there is an error.
+ pub fn write_block(&mut self, block_id: usize, buf: &[u8]) -> Result {
+ assert_eq!(buf.len(), SECTOR_SIZE);
+ let req = BlkReq {
+ type_: ReqType::Out,
+ reserved: 0,
+ sector: block_id as u64,
+ };
+ let mut resp = BlkResp::default();
+ self.queue.add_notify_wait_pop(
+ &[req.as_bytes(), buf],
+ &[resp.as_bytes_mut()],
+ &mut self.transport,
+ )?;
+ match resp.status {
+ RespStatus::OK => Ok(()),
+ _ => Err(Error::IoError),
+ }
+ }
+
+ /// Submits a request to write a block, but returns immediately without waiting for the write to
+ /// complete.
+ ///
+ /// # Arguments
+ ///
+ /// * `block_id` - The identifier of the block to write.
+ /// * `req` - A buffer which the driver can use for the request to send to the device. The
+ /// contents don't matter as `read_block_nb` will initialise it, but like the other buffers it
+ /// needs to be valid (and not otherwise used) until the corresponding `complete_read_block`
+ /// call.
+ /// * `buf` - The buffer in memory containing the data to write to the block.
+ /// * `resp` - A mutable reference to a variable provided by the caller
+ /// to contain the status of the request. The caller can safely
+ /// read the variable only after the request is complete.
+ ///
+ /// # Usage
+ ///
+ /// See [VirtIOBlk::read_block_nb].
+ ///
+ /// # Safety
+ ///
+ /// See [VirtIOBlk::read_block_nb].
+ pub unsafe fn write_block_nb(
+ &mut self,
+ block_id: usize,
+ req: &mut BlkReq,
+ buf: &[u8],
+ resp: &mut BlkResp,
+ ) -> Result<u16> {
+ assert_eq!(buf.len(), SECTOR_SIZE);
+ *req = BlkReq {
+ type_: ReqType::Out,
+ reserved: 0,
+ sector: block_id as u64,
+ };
+ let token = self
+ .queue
+ .add(&[req.as_bytes(), buf], &[resp.as_bytes_mut()])?;
+ self.transport.notify(QUEUE);
+ Ok(token)
+ }
+
+ /// Completes a write operation which was started by `write_block_nb`.
+ ///
+ /// # Safety
+ ///
+ /// The same buffers must be passed in again as were passed to `write_block_nb` when it returned
+ /// the token.
+ pub unsafe fn complete_write_block(
+ &mut self,
+ token: u16,
+ req: &BlkReq,
+ buf: &[u8],
+ resp: &mut BlkResp,
+ ) -> Result<()> {
+ self.queue
+ .pop_used(token, &[req.as_bytes(), buf], &[resp.as_bytes_mut()])?;
+ Ok(())
+ }
+
+ /// Fetches the token of the next completed request from the used ring and returns it, without
+ /// removing it from the used ring. If there are no pending completed requests returns `None`.
+ pub fn peek_used(&mut self) -> Option<u16> {
+ self.queue.peek_used()
+ }
+
+ /// Returns the size of the device's VirtQueue.
+ ///
+ /// This can be used to tell the caller how many channels to monitor on.
+ pub fn virt_queue_size(&self) -> u16 {
+ self.queue.size()
+ }
+}
+
+impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
+ fn drop(&mut self) {
+ // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
+ // after they have been freed.
+ self.transport.queue_unset(QUEUE);
+ }
+}
+
+#[repr(C)]
+struct BlkConfig {
+ /// Number of 512 Bytes sectors
+ capacity_low: Volatile<u32>,
+ capacity_high: Volatile<u32>,
+ size_max: Volatile<u32>,
+ seg_max: Volatile<u32>,
+ cylinders: Volatile<u16>,
+ heads: Volatile<u8>,
+ sectors: Volatile<u8>,
+ blk_size: Volatile<u32>,
+ physical_block_exp: Volatile<u8>,
+ alignment_offset: Volatile<u8>,
+ min_io_size: Volatile<u16>,
+ opt_io_size: Volatile<u32>,
+ // ... ignored
+}
+
+/// A VirtIO block device request.
+#[repr(C)]
+#[derive(AsBytes, Debug)]
+pub struct BlkReq {
+ type_: ReqType,
+ reserved: u32,
+ sector: u64,
+}
+
+impl Default for BlkReq {
+ fn default() -> Self {
+ Self {
+ type_: ReqType::In,
+ reserved: 0,
+ sector: 0,
+ }
+ }
+}
+
+/// Response of a VirtIOBlk request.
+#[repr(C)]
+#[derive(AsBytes, Debug, FromBytes)]
+pub struct BlkResp {
+ status: RespStatus,
+}
+
+impl BlkResp {
+ /// Return the status of a VirtIOBlk request.
+ pub fn status(&self) -> RespStatus {
+ self.status
+ }
+}
+
+#[repr(u32)]
+#[derive(AsBytes, Debug)]
+enum ReqType {
+ In = 0,
+ Out = 1,
+ Flush = 4,
+ Discard = 11,
+ WriteZeroes = 13,
+}
+
+/// Status of a VirtIOBlk request.
+#[repr(transparent)]
+#[derive(AsBytes, Copy, Clone, Debug, Eq, FromBytes, PartialEq)]
+pub struct RespStatus(u8);
+
+impl RespStatus {
+ /// Ok.
+ pub const OK: RespStatus = RespStatus(0);
+ /// IoErr.
+ pub const IO_ERR: RespStatus = RespStatus(1);
+ /// Unsupported yet.
+ pub const UNSUPPORTED: RespStatus = RespStatus(2);
+ /// Not ready.
+ pub const NOT_READY: RespStatus = RespStatus(3);
+}
+
+impl Default for BlkResp {
+ fn default() -> Self {
+ BlkResp {
+ status: RespStatus::NOT_READY,
+ }
+ }
+}
+
+/// The standard sector size of a VirtIO block device. Data is read and written in multiples of this
+/// size.
+pub const SECTOR_SIZE: usize = 512;
+
+bitflags! {
+ struct BlkFeature: u64 {
+ /// Device supports request barriers. (legacy)
+ const BARRIER = 1 << 0;
+ /// Maximum size of any single segment is in `size_max`.
+ const SIZE_MAX = 1 << 1;
+ /// Maximum number of segments in a request is in `seg_max`.
+ const SEG_MAX = 1 << 2;
+ /// Disk-style geometry specified in geometry.
+ const GEOMETRY = 1 << 4;
+ /// Device is read-only.
+ const RO = 1 << 5;
+ /// Block size of disk is in `blk_size`.
+ const BLK_SIZE = 1 << 6;
+ /// Device supports scsi packet commands. (legacy)
+ const SCSI = 1 << 7;
+ /// Cache flush command support.
+ const FLUSH = 1 << 9;
+ /// Device exports information on optimal I/O alignment.
+ const TOPOLOGY = 1 << 10;
+ /// Device can toggle its cache between writeback and writethrough modes.
+ const CONFIG_WCE = 1 << 11;
+ /// Device can support discard command, maximum discard sectors size in
+ /// `max_discard_sectors` and maximum discard segment number in
+ /// `max_discard_seg`.
+ const DISCARD = 1 << 13;
+ /// Device can support write zeroes command, maximum write zeroes sectors
+ /// size in `max_write_zeroes_sectors` and maximum write zeroes segment
+ /// number in `max_write_zeroes_seg`.
+ const WRITE_ZEROES = 1 << 14;
+
+ // device independent
+ const NOTIFY_ON_EMPTY = 1 << 24; // legacy
+ const ANY_LAYOUT = 1 << 27; // legacy
+ const RING_INDIRECT_DESC = 1 << 28;
+ const RING_EVENT_IDX = 1 << 29;
+ const UNUSED = 1 << 30; // legacy
+ const VERSION_1 = 1 << 32; // detect legacy
+
+ // the following since virtio v1.1
+ const ACCESS_PLATFORM = 1 << 33;
+ const RING_PACKED = 1 << 34;
+ const IN_ORDER = 1 << 35;
+ const ORDER_PLATFORM = 1 << 36;
+ const SR_IOV = 1 << 37;
+ const NOTIFICATION_DATA = 1 << 38;
+ }
+}
diff --git a/src/console.rs b/src/device/console.rs
index 50743ff..0cf2333 100644
--- a/src/console.rs
+++ b/src/device/console.rs
@@ -1,28 +1,69 @@
-use super::*;
+//! Driver for VirtIO console devices.
+
+use crate::hal::{Dma, Hal};
use crate::queue::VirtQueue;
use crate::transport::Transport;
use crate::volatile::{volread, ReadOnly, WriteOnly};
-use bitflags::*;
-use log::*;
+use crate::Result;
+use bitflags::bitflags;
+use core::ptr::NonNull;
+use log::info;
const QUEUE_RECEIVEQ_PORT_0: u16 = 0;
const QUEUE_TRANSMITQ_PORT_0: u16 = 1;
const QUEUE_SIZE: u16 = 2;
-/// Virtio console. Only one single port is allowed since ``alloc'' is disabled.
-/// Emergency and cols/rows unimplemented.
+/// Driver for a VirtIO console device.
+///
+/// Only a single port is allowed since `alloc` is disabled. Emergency write and cols/rows are not
+/// implemented.
+///
+/// # Example
+///
+/// ```
+/// # use virtio_drivers::{Error, Hal, transport::Transport};
+/// use virtio_drivers::device::console::VirtIOConsole;
+/// # fn example<HalImpl: Hal, T: Transport>(transport: T) -> Result<(), Error> {
+/// let mut console = VirtIOConsole::<HalImpl, _>::new(transport)?;
+///
+/// let info = console.info();
+/// println!("VirtIO console {}x{}", info.rows, info.columns);
+///
+/// for &c in b"Hello console!\n" {
+/// console.send(c)?;
+/// }
+///
+/// let c = console.recv(true)?;
+/// println!("Read {:?} from console.", c);
+/// # Ok(())
+/// # }
+/// ```
pub struct VirtIOConsole<'a, H: Hal, T: Transport> {
transport: T,
+ config_space: NonNull<Config>,
receiveq: VirtQueue<H>,
transmitq: VirtQueue<H>,
- queue_buf_dma: DMA<H>,
+ queue_buf_dma: Dma<H>,
queue_buf_rx: &'a mut [u8],
cursor: usize,
pending_len: usize,
+ /// The token of the outstanding receive request, if there is one.
+ receive_token: Option<u16>,
+}
+
+/// Information about a console device, read from its configuration space.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct ConsoleInfo {
+ /// The console height in characters.
+ pub rows: u16,
+ /// The console width in characters.
+ pub columns: u16,
+ /// The maxumum number of ports supported by the console device.
+ pub max_ports: u32,
}
impl<H: Hal, T: Transport> VirtIOConsole<'_, H, T> {
- /// Create a new VirtIO-Console driver.
+ /// Creates a new VirtIO console driver.
pub fn new(mut transport: T) -> Result<Self> {
transport.begin_init(|features| {
let features = Features::from_bits_truncate(features);
@@ -31,72 +72,100 @@ impl<H: Hal, T: Transport> VirtIOConsole<'_, H, T> {
(features & supported_features).bits()
});
let config_space = transport.config_space::<Config>()?;
- unsafe {
- let columns = volread!(config_space, cols);
- let rows = volread!(config_space, rows);
- let max_ports = volread!(config_space, max_nr_ports);
- info!(
- "Columns: {} Rows: {} Max ports: {}",
- columns, rows, max_ports,
- );
- }
let receiveq = VirtQueue::new(&mut transport, QUEUE_RECEIVEQ_PORT_0, QUEUE_SIZE)?;
let transmitq = VirtQueue::new(&mut transport, QUEUE_TRANSMITQ_PORT_0, QUEUE_SIZE)?;
- let queue_buf_dma = DMA::new(1)?;
+ let queue_buf_dma = Dma::new(1)?;
let queue_buf_rx = unsafe { &mut queue_buf_dma.as_buf()[0..] };
transport.finish_init();
let mut console = VirtIOConsole {
transport,
+ config_space,
receiveq,
transmitq,
queue_buf_dma,
queue_buf_rx,
cursor: 0,
pending_len: 0,
+ receive_token: None,
};
console.poll_retrieve()?;
Ok(console)
}
+ /// Returns a struct with information about the console device, such as the number of rows and columns.
+ pub fn info(&self) -> ConsoleInfo {
+ // Safe because config_space is a valid pointer to the device configuration space.
+ unsafe {
+ let columns = volread!(self.config_space, cols);
+ let rows = volread!(self.config_space, rows);
+ let max_ports = volread!(self.config_space, max_nr_ports);
+ ConsoleInfo {
+ rows,
+ columns,
+ max_ports,
+ }
+ }
+ }
+
+ /// Makes a request to the device to receive data, if there is not already an outstanding
+ /// receive request or some data already received and not yet returned.
fn poll_retrieve(&mut self) -> Result<()> {
- // Safe because the buffer lasts at least as long as the queue.
- unsafe { self.receiveq.add(&[], &[self.queue_buf_rx])? };
+ if self.receive_token.is_none() && self.cursor == self.pending_len {
+ // Safe because the buffer lasts at least as long as the queue, and there are no other
+ // outstanding requests using the buffer.
+ self.receive_token = Some(unsafe { self.receiveq.add(&[], &[self.queue_buf_rx]) }?);
+ }
Ok(())
}
- /// Acknowledge interrupt.
+ /// Acknowledges a pending interrupt, if any, and completes the outstanding finished read
+ /// request if there is one.
+ ///
+ /// Returns true if new data has been received.
pub fn ack_interrupt(&mut self) -> Result<bool> {
- let ack = self.transport.ack_interrupt();
- if !ack {
+ if !self.transport.ack_interrupt() {
return Ok(false);
}
+
+ self.finish_receive()
+ }
+
+ /// If there is an outstanding receive request and it has finished, completes it.
+ ///
+ /// Returns true if new data has been received.
+ fn finish_receive(&mut self) -> Result<bool> {
let mut flag = false;
- while let Ok((_token, len)) = self.receiveq.pop_used() {
- assert_eq!(flag, false);
- flag = true;
- assert_ne!(len, 0);
- self.cursor = 0;
- self.pending_len = len as usize;
+ if let Some(receive_token) = self.receive_token {
+ if self.receive_token == self.receiveq.peek_used() {
+ let len = self
+ .receiveq
+ .pop_used(receive_token, &[], &[self.queue_buf_rx])?;
+ flag = true;
+ assert_ne!(len, 0);
+ self.cursor = 0;
+ self.pending_len = len as usize;
+ }
}
Ok(flag)
}
- /// Try get char.
+ /// Returns the next available character from the console, if any.
+ ///
+ /// If no data has been received this will not block but immediately return `Ok<None>`.
pub fn recv(&mut self, pop: bool) -> Result<Option<u8>> {
+ self.finish_receive()?;
if self.cursor == self.pending_len {
return Ok(None);
}
let ch = self.queue_buf_rx[self.cursor];
if pop {
self.cursor += 1;
- if self.cursor == self.pending_len {
- self.poll_retrieve()?;
- }
+ self.poll_retrieve()?;
}
Ok(Some(ch))
}
- /// Put a char onto the device.
+ /// Sends a character to the console.
pub fn send(&mut self, chr: u8) -> Result<()> {
let buf: [u8; 1] = [chr];
// Safe because the buffer is valid until we pop_used below.
@@ -152,7 +221,10 @@ mod tests {
use super::*;
use crate::{
hal::fake::FakeHal,
- transport::fake::{FakeTransport, QueueStatus, State},
+ transport::{
+ fake::{FakeTransport, QueueStatus, State},
+ DeviceStatus, DeviceType,
+ },
};
use alloc::{sync::Arc, vec};
use core::ptr::NonNull;
diff --git a/src/gpu.rs b/src/device/gpu.rs
index 6c17f3a..1fa2132 100644
--- a/src/gpu.rs
+++ b/src/device/gpu.rs
@@ -1,10 +1,12 @@
-use super::*;
+//! Driver for VirtIO GPU devices.
+
+use crate::hal::{Dma, Hal};
use crate::queue::VirtQueue;
use crate::transport::Transport;
use crate::volatile::{volread, ReadOnly, Volatile, WriteOnly};
-use bitflags::*;
-use core::{fmt, hint::spin_loop};
-use log::*;
+use crate::{pages, Error, Result, PAGE_SIZE};
+use bitflags::bitflags;
+use log::info;
/// A virtio based graphics adapter.
///
@@ -17,15 +19,15 @@ pub struct VirtIOGpu<'a, H: Hal, T: Transport> {
transport: T,
rect: Option<Rect>,
/// DMA area of frame buffer.
- frame_buffer_dma: Option<DMA<H>>,
+ frame_buffer_dma: Option<Dma<H>>,
/// DMA area of cursor image buffer.
- cursor_buffer_dma: Option<DMA<H>>,
+ cursor_buffer_dma: Option<Dma<H>>,
/// Queue for sending control commands.
control_queue: VirtQueue<H>,
/// Queue for sending cursor commands.
cursor_queue: VirtQueue<H>,
/// Queue buffer DMA
- queue_buf_dma: DMA<H>,
+ queue_buf_dma: Dma<H>,
/// Send buffer for queue.
queue_buf_send: &'a mut [u8],
/// Recv buffer for queue.
@@ -56,7 +58,7 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
let control_queue = VirtQueue::new(&mut transport, QUEUE_TRANSMIT, 2)?;
let cursor_queue = VirtQueue::new(&mut transport, QUEUE_CURSOR, 2)?;
- let queue_buf_dma = DMA::new(2)?;
+ let queue_buf_dma = Dma::new(2)?;
let queue_buf_send = unsafe { &mut queue_buf_dma.as_buf()[..PAGE_SIZE] };
let queue_buf_recv = unsafe { &mut queue_buf_dma.as_buf()[PAGE_SIZE..] };
@@ -102,7 +104,7 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
// alloc continuous pages for the frame buffer
let size = display_info.rect.width * display_info.rect.height * 4;
- let frame_buffer_dma = DMA::new(pages(size as usize))?;
+ let frame_buffer_dma = Dma::new(pages(size as usize))?;
// resource_attach_backing
self.resource_attach_backing(RESOURCE_ID_FB, frame_buffer_dma.paddr() as u64, size)?;
@@ -138,7 +140,7 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
if cursor_image.len() != size as usize {
return Err(Error::InvalidParam);
}
- let cursor_buffer_dma = DMA::new(pages(size as usize))?;
+ let cursor_buffer_dma = Dma::new(pages(size as usize))?;
let buf = unsafe { cursor_buffer_dma.as_buf() };
buf.copy_from_slice(cursor_image);
@@ -169,16 +171,11 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
unsafe {
(self.queue_buf_send.as_mut_ptr() as *mut Req).write(req);
}
- let token = unsafe {
- self.control_queue
- .add(&[self.queue_buf_send], &[self.queue_buf_recv])?
- };
- self.transport.notify(QUEUE_TRANSMIT);
- while !self.control_queue.can_pop() {
- spin_loop();
- }
- let (popped_token, _) = self.control_queue.pop_used()?;
- assert_eq!(popped_token, token);
+ self.control_queue.add_notify_wait_pop(
+ &[self.queue_buf_send],
+ &[self.queue_buf_recv],
+ &mut self.transport,
+ )?;
Ok(unsafe { (self.queue_buf_recv.as_ptr() as *const Rsp).read() })
}
@@ -187,13 +184,8 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
unsafe {
(self.queue_buf_send.as_mut_ptr() as *mut Req).write(req);
}
- let token = unsafe { self.cursor_queue.add(&[self.queue_buf_send], &[])? };
- self.transport.notify(QUEUE_CURSOR);
- while !self.cursor_queue.can_pop() {
- spin_loop();
- }
- let (popped_token, _) = self.cursor_queue.pop_used()?;
- assert_eq!(popped_token, token);
+ self.cursor_queue
+ .add_notify_wait_pop(&[self.queue_buf_send], &[], &mut self.transport)?;
Ok(())
}
@@ -310,15 +302,6 @@ struct Config {
num_scanouts: Volatile<u32>,
}
-impl fmt::Debug for Config {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("Config")
- .field("events_read", &self.events_read)
- .field("num_scanouts", &self.num_scanouts)
- .finish()
- }
-}
-
/// Display configuration has changed.
const EVENT_DISPLAY: u32 = 1 << 0;
diff --git a/src/input.rs b/src/device/input.rs
index 70cef0f..5e4be62 100644
--- a/src/input.rs
+++ b/src/device/input.rs
@@ -1,10 +1,15 @@
-use super::*;
+//! Driver for VirtIO input devices.
+
+use crate::hal::Hal;
+use crate::queue::VirtQueue;
use crate::transport::Transport;
use crate::volatile::{volread, volwrite, ReadOnly, WriteOnly};
+use crate::Result;
use alloc::boxed::Box;
-use bitflags::*;
+use bitflags::bitflags;
use core::ptr::NonNull;
-use log::*;
+use log::info;
+use zerocopy::{AsBytes, FromBytes};
/// Virtual human interface devices such as keyboards, mice and tablets.
///
@@ -37,7 +42,7 @@ impl<H: Hal, T: Transport> VirtIOInput<H, T> {
let status_queue = VirtQueue::new(&mut transport, QUEUE_STATUS, QUEUE_SIZE as u16)?;
for (i, event) in event_buf.as_mut().iter_mut().enumerate() {
// Safe because the buffer lasts as long as the queue.
- let token = unsafe { event_queue.add(&[], &[event.as_buf_mut()])? };
+ let token = unsafe { event_queue.add(&[], &[event.as_bytes_mut()])? };
assert_eq!(token, i as u16);
}
@@ -59,11 +64,14 @@ impl<H: Hal, T: Transport> VirtIOInput<H, T> {
/// Pop the pending event.
pub fn pop_pending_event(&mut self) -> Option<InputEvent> {
- if let Ok((token, _)) = self.event_queue.pop_used() {
+ if let Some(token) = self.event_queue.peek_used() {
let event = &mut self.event_buf[token as usize];
+ self.event_queue
+ .pop_used(token, &[], &[event.as_bytes_mut()])
+ .ok()?;
// requeue
// Safe because buffer lasts as long as the queue.
- if let Ok(new_token) = unsafe { self.event_queue.add(&[], &[event.as_buf_mut()]) } {
+ if let Ok(new_token) = unsafe { self.event_queue.add(&[], &[event.as_bytes_mut()]) } {
// This only works because nothing happen between `pop_used` and `add` that affects
// the list of free descriptors in the queue, so `add` reuses the descriptor which
// was just freed by `pop_used`.
@@ -161,7 +169,7 @@ struct DevIDs {
/// Both queues use the same `virtio_input_event` struct. `type`, `code` and `value`
/// are filled according to the Linux input layer (evdev) interface.
#[repr(C)]
-#[derive(Clone, Copy, Debug, Default)]
+#[derive(AsBytes, Clone, Copy, Debug, Default, FromBytes)]
pub struct InputEvent {
/// Event type.
pub event_type: u16,
@@ -171,8 +179,6 @@ pub struct InputEvent {
pub value: u32,
}
-unsafe impl AsBuf for InputEvent {}
-
bitflags! {
struct Feature: u64 {
// device independent
diff --git a/src/device/mod.rs b/src/device/mod.rs
new file mode 100644
index 0000000..f3e4f66
--- /dev/null
+++ b/src/device/mod.rs
@@ -0,0 +1,8 @@
+//! Drivers for specific VirtIO devices.
+
+pub mod blk;
+pub mod console;
+pub mod gpu;
+#[cfg(feature = "alloc")]
+pub mod input;
+pub mod net;
diff --git a/src/net.rs b/src/device/net.rs
index 82abc60..14bbe4b 100644
--- a/src/net.rs
+++ b/src/device/net.rs
@@ -1,10 +1,14 @@
-use core::mem::{size_of, MaybeUninit};
+//! Driver for VirtIO network devices.
-use super::*;
+use crate::hal::Hal;
+use crate::queue::VirtQueue;
use crate::transport::Transport;
use crate::volatile::{volread, ReadOnly};
-use bitflags::*;
-use log::*;
+use crate::Result;
+use bitflags::bitflags;
+use core::mem::{size_of, MaybeUninit};
+use log::{debug, info};
+use zerocopy::{AsBytes, FromBytes};
/// The virtio network device is a virtual ethernet card.
///
@@ -75,7 +79,7 @@ impl<H: Hal, T: Transport> VirtIONet<H, T> {
/// Receive a packet.
pub fn recv(&mut self, buf: &mut [u8]) -> Result<usize> {
let mut header = MaybeUninit::<Header>::uninit();
- let header_buf = unsafe { (*header.as_mut_ptr()).as_buf_mut() };
+ let header_buf = unsafe { (*header.as_mut_ptr()).as_bytes_mut() };
let len =
self.recv_queue
.add_notify_wait_pop(&[], &[header_buf, buf], &mut self.transport)?;
@@ -87,7 +91,7 @@ impl<H: Hal, T: Transport> VirtIONet<H, T> {
pub fn send(&mut self, buf: &[u8]) -> Result {
let header = unsafe { MaybeUninit::<Header>::zeroed().assume_init() };
self.send_queue
- .add_notify_wait_pop(&[header.as_buf(), buf], &[], &mut self.transport)?;
+ .add_notify_wait_pop(&[header.as_bytes(), buf], &[], &mut self.transport)?;
Ok(())
}
}
@@ -186,7 +190,7 @@ type EthernetAddress = [u8; 6];
// virtio 5.1.6 Device Operation
#[repr(C)]
-#[derive(Debug)]
+#[derive(AsBytes, Debug, FromBytes)]
struct Header {
flags: Flags,
gso_type: GsoType,
@@ -197,9 +201,9 @@ struct Header {
// payload starts from here
}
-unsafe impl AsBuf for Header {}
-
bitflags! {
+ #[repr(transparent)]
+ #[derive(AsBytes, FromBytes)]
struct Flags: u8 {
const NEEDS_CSUM = 1;
const DATA_VALID = 2;
@@ -207,14 +211,16 @@ bitflags! {
}
}
-#[repr(u8)]
-#[derive(Debug, Copy, Clone, Eq, PartialEq)]
-enum GsoType {
- NONE = 0,
- TCPV4 = 1,
- UDP = 3,
- TCPV6 = 4,
- ECN = 0x80,
+#[repr(transparent)]
+#[derive(AsBytes, Debug, Copy, Clone, Eq, FromBytes, PartialEq)]
+struct GsoType(u8);
+
+impl GsoType {
+ const NONE: GsoType = GsoType(0);
+ const TCPV4: GsoType = GsoType(1);
+ const UDP: GsoType = GsoType(3);
+ const TCPV6: GsoType = GsoType(4);
+ const ECN: GsoType = GsoType(0x80);
}
const QUEUE_RECEIVE: u16 = 0;
diff --git a/src/hal.rs b/src/hal.rs
index 344db00..891f87f 100644
--- a/src/hal.rs
+++ b/src/hal.rs
@@ -1,8 +1,8 @@
#[cfg(test)]
pub mod fake;
-use super::*;
-use core::marker::PhantomData;
+use crate::{Error, Result, PAGE_SIZE};
+use core::{marker::PhantomData, ptr::NonNull};
/// A virtual memory address in the address space of the program.
pub type VirtAddr = usize;
@@ -12,19 +12,19 @@ pub type PhysAddr = usize;
/// A region of contiguous physical memory used for DMA.
#[derive(Debug)]
-pub struct DMA<H: Hal> {
+pub struct Dma<H: Hal> {
paddr: usize,
pages: usize,
_phantom: PhantomData<H>,
}
-impl<H: Hal> DMA<H> {
+impl<H: Hal> Dma<H> {
pub fn new(pages: usize) -> Result<Self> {
let paddr = H::dma_alloc(pages);
if paddr == 0 {
return Err(Error::DmaError);
}
- Ok(DMA {
+ Ok(Self {
paddr,
pages,
_phantom: PhantomData::default(),
@@ -41,13 +41,13 @@ impl<H: Hal> DMA<H> {
/// Convert to a buffer
pub unsafe fn as_buf(&self) -> &'static mut [u8] {
- core::slice::from_raw_parts_mut(self.vaddr() as _, PAGE_SIZE * self.pages as usize)
+ core::slice::from_raw_parts_mut(self.vaddr() as _, PAGE_SIZE * self.pages)
}
}
-impl<H: Hal> Drop for DMA<H> {
+impl<H: Hal> Drop for Dma<H> {
fn drop(&mut self) {
- let err = H::dma_dealloc(self.paddr as usize, self.pages as usize);
+ let err = H::dma_dealloc(self.paddr, self.pages);
assert_eq!(err, 0, "failed to deallocate DMA");
}
}
@@ -61,7 +61,22 @@ pub trait Hal {
/// Converts a physical address used for virtio to a virtual address which the program can
/// access.
fn phys_to_virt(paddr: PhysAddr) -> VirtAddr;
- /// Converts a virtual address which the program can access to the corresponding physical
- /// address to use for virtio.
- fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr;
+ /// Shares the given memory range with the device, and returns the physical address that the
+ /// device can use to access it.
+ ///
+ /// This may involve mapping the buffer into an IOMMU, giving the host permission to access the
+ /// memory, or copying it to a special region where it can be accessed.
+ fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr;
+ /// Unshares the given memory range from the device and (if necessary) copies it back to the
+ /// original buffer.
+ fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection);
+}
+
+/// The direction in which a buffer is passed.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum BufferDirection {
+ /// The buffer is written by the driver and read by the device.
+ DriverToDevice,
+ /// The buffer is written by the device and read by the driver.
+ DeviceToDriver,
}
diff --git a/src/hal/fake.rs b/src/hal/fake.rs
index 6f81516..655ad43 100644
--- a/src/hal/fake.rs
+++ b/src/hal/fake.rs
@@ -1,8 +1,8 @@
//! Fake HAL implementation for tests.
-use crate::{Hal, PhysAddr, VirtAddr, PAGE_SIZE};
+use crate::{BufferDirection, Hal, PhysAddr, VirtAddr, PAGE_SIZE};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
-use core::alloc::Layout;
+use core::{alloc::Layout, ptr::NonNull};
#[derive(Debug)]
pub struct FakeHal;
@@ -35,7 +35,18 @@ impl Hal for FakeHal {
paddr
}
- fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
- vaddr
+ fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
+ let vaddr = buffer.as_ptr() as *mut u8 as usize;
+ // Nothing to do, as the host already has access to all memory.
+ virt_to_phys(vaddr)
}
+
+ fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
+ // Nothing to do, as the host already has access to all memory and we didn't copy the buffer
+ // anywhere else.
+ }
+}
+
+fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
+ vaddr
}
diff --git a/src/lib.rs b/src/lib.rs
index d9a4ce6..7decb7a 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,43 @@
//! VirtIO guest drivers.
+//!
+//! These drivers can be used by bare-metal code (such as a bootloader or OS kernel) running in a VM
+//! to interact with VirtIO devices provided by the VMM (such as QEMU or crosvm).
+//!
+//! # Usage
+//!
+//! You must first implement the [`Hal`] trait, to allocate DMA regions and translate between
+//! physical addresses (as seen by devices) and virtual addresses (as seen by your program). You can
+//! then construct the appropriate transport for the VirtIO device, e.g. for an MMIO device (perhaps
+//! discovered from the device tree):
+//!
+//! ```
+//! use core::ptr::NonNull;
+//! use virtio_drivers::transport::mmio::{MmioTransport, VirtIOHeader};
+//!
+//! # fn example(mmio_device_address: usize) {
+//! let header = NonNull::new(mmio_device_address as *mut VirtIOHeader).unwrap();
+//! let transport = unsafe { MmioTransport::new(header) }.unwrap();
+//! # }
+//! ```
+//!
+//! You can then check what kind of VirtIO device it is and construct the appropriate driver:
+//!
+//! ```
+//! # use virtio_drivers::Hal;
+//! use virtio_drivers::{
+//! device::console::VirtIOConsole,
+//! transport::{mmio::MmioTransport, DeviceType, Transport},
+//! };
+
+//!
+//! # fn example<HalImpl: Hal>(transport: MmioTransport) {
+//! if transport.device_type() == DeviceType::Console {
+//! let mut console = VirtIOConsole::<HalImpl, _>::new(transport).unwrap();
+//! // Send a byte to the console.
+//! console.send(b'H').unwrap();
+//! }
+//! # }
+//! ```
#![cfg_attr(not(test), no_std)]
#![deny(unused_must_use, missing_docs)]
@@ -8,30 +47,13 @@
#[cfg(any(feature = "alloc", test))]
extern crate alloc;
-mod blk;
-mod console;
-mod gpu;
+pub mod device;
mod hal;
-#[cfg(feature = "alloc")]
-mod input;
-mod net;
mod queue;
-mod transport;
+pub mod transport;
mod volatile;
-pub use self::blk::{BlkResp, RespStatus, VirtIOBlk};
-pub use self::console::VirtIOConsole;
-pub use self::gpu::VirtIOGpu;
-pub use self::hal::{Hal, PhysAddr, VirtAddr};
-#[cfg(feature = "alloc")]
-pub use self::input::{InputConfigSelect, InputEvent, VirtIOInput};
-pub use self::net::VirtIONet;
-use self::queue::VirtQueue;
-pub use self::transport::mmio::{MmioError, MmioTransport, MmioVersion, VirtIOHeader};
-pub use self::transport::pci;
-pub use self::transport::{DeviceStatus, DeviceType, Transport};
-use core::mem::size_of;
-use hal::*;
+pub use self::hal::{BufferDirection, Hal, PhysAddr, VirtAddr};
/// The page size in bytes supported by the library (4 KiB).
pub const PAGE_SIZE: usize = 0x1000;
@@ -42,10 +64,12 @@ pub type Result<T = ()> = core::result::Result<T, Error>;
/// The error type of VirtIO drivers.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Error {
- /// The buffer is too small.
- BufferTooSmall,
+ /// There are not enough descriptors available in the virtqueue, try again later.
+ QueueFull,
/// The device is not ready.
NotReady,
+ /// The device used a different descriptor chain to the one we were expecting.
+ WrongToken,
/// The queue is already in use.
AlreadyUsed,
/// Invalid parameter.
@@ -69,13 +93,3 @@ fn align_up(size: usize) -> usize {
fn pages(size: usize) -> usize {
(size + PAGE_SIZE - 1) / PAGE_SIZE
}
-
-/// Convert a struct into a byte buffer.
-unsafe trait AsBuf: Sized {
- fn as_buf(&self) -> &[u8] {
- unsafe { core::slice::from_raw_parts(self as *const _ as _, size_of::<Self>()) }
- }
- fn as_buf_mut(&mut self) -> &mut [u8] {
- unsafe { core::slice::from_raw_parts_mut(self as *mut _ as _, size_of::<Self>()) }
- }
-}
diff --git a/src/queue.rs b/src/queue.rs
index 4dc7c01..6dfc309 100644
--- a/src/queue.rs
+++ b/src/queue.rs
@@ -1,21 +1,23 @@
#[cfg(test)]
+use crate::hal::VirtAddr;
+use crate::hal::{BufferDirection, Dma, Hal};
+use crate::transport::Transport;
+use crate::{align_up, Error, Result, PAGE_SIZE};
+use bitflags::bitflags;
+#[cfg(test)]
use core::cmp::min;
use core::hint::spin_loop;
use core::mem::size_of;
use core::ptr::{self, addr_of_mut, NonNull};
use core::sync::atomic::{fence, Ordering};
-use super::*;
-use crate::transport::Transport;
-use bitflags::*;
-
/// The mechanism for bulk data transport on virtio devices.
///
/// Each device can have zero or more virtqueues.
#[derive(Debug)]
pub struct VirtQueue<H: Hal> {
/// DMA guard
- dma: DMA<H>,
+ dma: Dma<H>,
/// Descriptor table
desc: NonNull<[Descriptor]>,
/// Available ring
@@ -30,7 +32,7 @@ pub struct VirtQueue<H: Hal> {
/// This is both the number of descriptors, and the number of slots in the available and used
/// rings.
queue_size: u16,
- /// The number of used queues.
+ /// The number of descriptors currently in use.
num_used: u16,
/// The head desc index of the free list.
free_head: u16,
@@ -49,7 +51,7 @@ impl<H: Hal> VirtQueue<H> {
}
let layout = VirtQueueLayout::new(size);
// Allocate contiguous pages.
- let dma = DMA::new(layout.size / PAGE_SIZE)?;
+ let dma = Dma::new(layout.size / PAGE_SIZE)?;
transport.queue_set(
idx,
@@ -102,7 +104,7 @@ impl<H: Hal> VirtQueue<H> {
return Err(Error::InvalidParam);
}
if inputs.len() + outputs.len() + self.num_used as usize > self.queue_size as usize {
- return Err(Error::BufferTooSmall);
+ return Err(Error::QueueFull);
}
// allocate descriptors from free list
@@ -112,17 +114,9 @@ impl<H: Hal> VirtQueue<H> {
// Safe because self.desc is properly aligned, dereferenceable and initialised, and nothing
// else reads or writes the free descriptors during this block.
unsafe {
- for input in inputs.iter() {
- let mut desc = self.desc_ptr(self.free_head);
- (*desc).set_buf::<H>(NonNull::new(*input as *mut [u8]).unwrap());
- (*desc).flags = DescFlags::NEXT;
- last = self.free_head;
- self.free_head = (*desc).next;
- }
- for output in outputs.iter() {
+ for (buffer, direction) in input_output_iter(inputs, outputs) {
let desc = self.desc_ptr(self.free_head);
- (*desc).set_buf::<H>(NonNull::new(*output).unwrap());
- (*desc).flags = DescFlags::NEXT | DescFlags::WRITE;
+ (*desc).set_buf::<H>(buffer, direction, DescFlags::NEXT);
last = self.free_head;
self.free_head = (*desc).next;
}
@@ -172,13 +166,12 @@ impl<H: Hal> VirtQueue<H> {
// Notify the queue.
transport.notify(self.queue_idx);
+ // Wait until there is at least one element in the used ring.
while !self.can_pop() {
spin_loop();
}
- let (popped_token, length) = self.pop_used()?;
- assert_eq!(popped_token, token);
- Ok(length)
+ self.pop_used(token, inputs, outputs)
}
/// Returns a non-null pointer to the descriptor at the given index.
@@ -197,44 +190,75 @@ impl<H: Hal> VirtQueue<H> {
self.last_used_idx != unsafe { (*self.used.as_ptr()).idx }
}
+ /// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
+ /// `None` if the used ring is empty.
+ pub fn peek_used(&self) -> Option<u16> {
+ if self.can_pop() {
+ let last_used_slot = self.last_used_idx & (self.queue_size - 1);
+ // Safe because self.used points to a valid, aligned, initialised, dereferenceable,
+ // readable instance of UsedRing.
+ Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
+ } else {
+ None
+ }
+ }
+
/// Returns the number of free descriptors.
pub fn available_desc(&self) -> usize {
(self.queue_size - self.num_used) as usize
}
- /// Recycle descriptors in the list specified by head.
+ /// Unshares buffers in the list starting at descriptor index `head` and adds them to the free
+ /// list. Unsharing may involve copying data back to the original buffers, so they must be
+ /// passed in too.
///
/// This will push all linked descriptors at the front of the free list.
- fn recycle_descriptors(&mut self, mut head: u16) {
+ fn recycle_descriptors(&mut self, head: u16, inputs: &[*const [u8]], outputs: &[*mut [u8]]) {
let original_free_head = self.free_head;
self.free_head = head;
- loop {
- let desc = self.desc_ptr(head);
+ let mut next = Some(head);
+
+ for (buffer, direction) in input_output_iter(inputs, outputs) {
+ let desc = self.desc_ptr(next.expect("Descriptor chain was shorter than expected."));
+
// Safe because self.desc is properly aligned, dereferenceable and initialised, and
// nothing else reads or writes the descriptor during this block.
- unsafe {
- let flags = (*desc).flags;
+ let paddr = unsafe {
+ let paddr = (*desc).addr;
+ (*desc).unset_buf();
self.num_used -= 1;
- if flags.contains(DescFlags::NEXT) {
- head = (*desc).next;
- } else {
+ next = (*desc).next();
+ if next.is_none() {
(*desc).next = original_free_head;
- return;
}
- }
+ paddr
+ };
+
+ // Unshare the buffer (and perhaps copy its contents back to the original buffer).
+ H::unshare(paddr as usize, buffer, direction);
+ }
+
+ if next.is_some() {
+ panic!("Descriptor chain was longer than expected.");
}
}
- /// Get a token from device used buffers, return (token, len).
+ /// If the given token is next on the device used queue, pops it and returns the total buffer
+ /// length which was used (written) by the device.
///
/// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
- pub fn pop_used(&mut self) -> Result<(u16, u32)> {
+ pub fn pop_used(
+ &mut self,
+ token: u16,
+ inputs: &[*const [u8]],
+ outputs: &[*mut [u8]],
+ ) -> Result<u32> {
if !self.can_pop() {
return Err(Error::NotReady);
}
- // read barrier
- fence(Ordering::SeqCst);
+ // Read barrier not necessary, as can_pop already has one.
+ // Get the index of the start of the descriptor chain for the next element in the used ring.
let last_used_slot = self.last_used_idx & (self.queue_size - 1);
let index;
let len;
@@ -245,10 +269,15 @@ impl<H: Hal> VirtQueue<H> {
len = (*self.used.as_ptr()).ring[last_used_slot as usize].len;
}
- self.recycle_descriptors(index);
+ if index != token {
+ // The device used a different descriptor chain to the one we were expecting.
+ return Err(Error::WrongToken);
+ }
+
+ self.recycle_descriptors(index, inputs, outputs);
self.last_used_idx = self.last_used_idx.wrapping_add(1);
- Ok((index, len))
+ Ok(len)
}
/// Return size of the queue.
@@ -294,12 +323,42 @@ pub(crate) struct Descriptor {
}
impl Descriptor {
+ /// Sets the buffer address, length and flags, and shares it with the device.
+ ///
/// # Safety
///
/// The caller must ensure that the buffer lives at least as long as the descriptor is active.
- unsafe fn set_buf<H: Hal>(&mut self, buf: NonNull<[u8]>) {
- self.addr = H::virt_to_phys(buf.as_ptr() as *mut u8 as usize) as u64;
+ unsafe fn set_buf<H: Hal>(
+ &mut self,
+ buf: NonNull<[u8]>,
+ direction: BufferDirection,
+ extra_flags: DescFlags,
+ ) {
+ self.addr = H::share(buf, direction) as u64;
self.len = buf.len() as u32;
+ self.flags = extra_flags
+ | match direction {
+ BufferDirection::DeviceToDriver => DescFlags::WRITE,
+ BufferDirection::DriverToDevice => DescFlags::empty(),
+ };
+ }
+
+ /// Sets the buffer address and length to 0.
+ ///
+ /// This must only be called once the device has finished using the descriptor.
+ fn unset_buf(&mut self) {
+ self.addr = 0;
+ self.len = 0;
+ }
+
+ /// Returns the index of the next descriptor in the chain if the `NEXT` flag is set, or `None`
+ /// if it is not (and thus this descriptor is the end of the chain).
+ fn next(&self) -> Option<u16> {
+ if self.flags.contains(DescFlags::NEXT) {
+ Some(self.next)
+ } else {
+ None
+ }
}
}
@@ -383,9 +442,8 @@ pub(crate) fn fake_write_to_queue(
);
remaining_data = &remaining_data[length_to_write..];
- if flags.contains(DescFlags::NEXT) {
- let next = descriptor.next as usize;
- descriptor = &(*descriptors)[next];
+ if let Some(next) = descriptor.next() {
+ descriptor = &(*descriptors)[next as usize];
} else {
assert_eq!(remaining_data.len(), 0);
break;
@@ -402,7 +460,10 @@ pub(crate) fn fake_write_to_queue(
#[cfg(test)]
mod tests {
use super::*;
- use crate::{hal::fake::FakeHal, transport::mmio::MODERN_VERSION};
+ use crate::{
+ hal::fake::FakeHal,
+ transport::mmio::{MmioTransport, VirtIOHeader, MODERN_VERSION},
+ };
use core::ptr::NonNull;
#[test]
@@ -449,14 +510,14 @@ mod tests {
}
#[test]
- fn add_too_big() {
+ fn add_too_many() {
let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
let mut queue = VirtQueue::<FakeHal>::new(&mut transport, 0, 4).unwrap();
assert_eq!(queue.available_desc(), 4);
assert_eq!(
unsafe { queue.add(&[&[], &[], &[]], &[&mut [], &mut []]) }.unwrap_err(),
- Error::BufferTooSmall
+ Error::QueueFull
);
}
@@ -521,3 +582,27 @@ mod tests {
}
}
}
+
+/// Returns an iterator over the buffers of first `inputs` and then `outputs`, paired with the
+/// corresponding `BufferDirection`.
+///
+/// Panics if any of the buffer pointers is null.
+fn input_output_iter<'a>(
+ inputs: &'a [*const [u8]],
+ outputs: &'a [*mut [u8]],
+) -> impl Iterator<Item = (NonNull<[u8]>, BufferDirection)> + 'a {
+ inputs
+ .iter()
+ .map(|input| {
+ (
+ NonNull::new(*input as *mut [u8]).unwrap(),
+ BufferDirection::DriverToDevice,
+ )
+ })
+ .chain(outputs.iter().map(|output| {
+ (
+ NonNull::new(*output).unwrap(),
+ BufferDirection::DeviceToDriver,
+ )
+ }))
+}
diff --git a/src/transport/fake.rs b/src/transport/fake.rs
index 40105ec..352c705 100644
--- a/src/transport/fake.rs
+++ b/src/transport/fake.rs
@@ -1,7 +1,7 @@
-use super::{DeviceStatus, Transport};
+use super::{DeviceStatus, DeviceType, Transport};
use crate::{
queue::{fake_write_to_queue, Descriptor},
- DeviceType, PhysAddr, Result,
+ PhysAddr, Result,
};
use alloc::{sync::Arc, vec::Vec};
use core::{any::TypeId, ptr::NonNull};
diff --git a/src/transport/mmio.rs b/src/transport/mmio.rs
index 8321ec6..b87ffe1 100644
--- a/src/transport/mmio.rs
+++ b/src/transport/mmio.rs
@@ -1,3 +1,5 @@
+//! MMIO transport for VirtIO.
+
use super::{DeviceStatus, DeviceType, Transport};
use crate::{
align_up,
@@ -437,7 +439,7 @@ impl Transport for MmioTransport {
volwrite!(self.header, queue_num, 0);
volwrite!(self.header, queue_desc_low, 0);
volwrite!(self.header, queue_desc_high, 0);
- volwrite!(self.header, queue_driver_low, 9);
+ volwrite!(self.header, queue_driver_low, 0);
volwrite!(self.header, queue_driver_high, 0);
volwrite!(self.header, queue_device_low, 0);
volwrite!(self.header, queue_device_high, 0);
diff --git a/src/transport/mod.rs b/src/transport/mod.rs
index 1156c09..02c2350 100644
--- a/src/transport/mod.rs
+++ b/src/transport/mod.rs
@@ -1,3 +1,5 @@
+//! VirtIO transports.
+
#[cfg(test)]
pub mod fake;
pub mod mmio;
diff --git a/src/transport/pci.rs b/src/transport/pci.rs
index 58584cb..b996c3c 100644
--- a/src/transport/pci.rs
+++ b/src/transport/pci.rs
@@ -85,7 +85,6 @@ pub struct PciTransport {
device_function: DeviceFunction,
/// The common configuration structure within some BAR.
common_cfg: NonNull<CommonCfg>,
- // TODO: Use a raw slice, once they are supported by our MSRV.
/// The start of the queue notification region within some BAR.
notify_region: NonNull<[WriteOnly<u16>]>,
notify_off_multiplier: u32,
diff --git a/src/transport/pci/bus.rs b/src/transport/pci/bus.rs
index 7bb42a8..dd6f520 100644
--- a/src/transport/pci/bus.rs
+++ b/src/transport/pci/bus.rs
@@ -8,10 +8,6 @@ use core::{
use log::warn;
const INVALID_READ: u32 = 0xffffffff;
-// PCI MMIO configuration region size.
-const AARCH64_PCI_CFG_SIZE: u32 = 0x1000000;
-// PCIe MMIO configuration region size.
-const AARCH64_PCIE_CFG_SIZE: u32 = 0x10000000;
/// The maximum number of devices on a bus.
const MAX_DEVICES: u8 = 32;
@@ -117,6 +113,16 @@ pub enum Cam {
Ecam,
}
+impl Cam {
+ /// Returns the total size in bytes of the memory-mapped region.
+ pub const fn size(self) -> u32 {
+ match self {
+ Self::MmioCam => 0x1000000,
+ Self::Ecam => 0x10000000,
+ }
+ }
+}
+
impl PciRoot {
/// Wraps the PCI root complex with the given MMIO base address.
///
@@ -155,19 +161,13 @@ impl PciRoot {
let bdf = (device_function.bus as u32) << 8
| (device_function.device as u32) << 3
| device_function.function as u32;
- let address;
- match self.cam {
- Cam::MmioCam => {
- address = bdf << 8 | register_offset as u32;
- // Ensure that address is within range.
- assert!(address < AARCH64_PCI_CFG_SIZE);
- }
- Cam::Ecam => {
- address = bdf << 12 | register_offset as u32;
- // Ensure that address is within range.
- assert!(address < AARCH64_PCIE_CFG_SIZE);
- }
- }
+ let address =
+ bdf << match self.cam {
+ Cam::MmioCam => 8,
+ Cam::Ecam => 12,
+ } | register_offset as u32;
+ // Ensure that address is within range.
+ assert!(address < self.cam.size());
// Ensure that address is word-aligned.
assert!(address & 0x3 == 0);
address
diff --git a/src/volatile.rs b/src/volatile.rs
index 01d97e2..b7059d1 100644
--- a/src/volatile.rs
+++ b/src/volatile.rs
@@ -1,5 +1,5 @@
/// An MMIO register which can only be read from.
-#[derive(Debug, Default)]
+#[derive(Default)]
#[repr(transparent)]
pub struct ReadOnly<T: Copy>(T);
@@ -11,12 +11,12 @@ impl<T: Copy> ReadOnly<T> {
}
/// An MMIO register which can only be written to.
-#[derive(Debug, Default)]
+#[derive(Default)]
#[repr(transparent)]
pub struct WriteOnly<T: Copy>(T);
/// An MMIO register which may be both read and written.
-#[derive(Debug, Default)]
+#[derive(Default)]
#[repr(transparent)]
pub struct Volatile<T: Copy>(T);