summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDylan Reid <dgreid@chromium.org>2021-03-11 20:48:54 -0800
committerDylan Reid <dgreid@chromium.org>2021-03-11 20:48:54 -0800
commitc6a3c47153b004bd79d435d7a62d7ac8c96b993a (patch)
tree01f5cbf6af8930a21ed86d6cc87bbb7de5e5c5be
parent6ae7e17c9ac65410e20c12d628539c4fc06cf16b (diff)
parentee3e8722706c984b3dfe12d3a130e92101b78e8f (diff)
downloadvmm_vhost-c6a3c47153b004bd79d435d7a62d7ac8c96b993a.tar.gz
Merge remote-tracking branch 'cros/upstream/master' into HEAD
Change-Id: I81dfdf258d902b434e8a1b8339eb367cb7211153
-rw-r--r--coverage_config_x86_64.json2
-rw-r--r--src/vhost_user/dummy_slave.rs13
-rw-r--r--src/vhost_user/master.rs63
-rw-r--r--src/vhost_user/message.rs67
-rw-r--r--src/vhost_user/mod.rs27
-rw-r--r--src/vhost_user/slave_req_handler.rs65
6 files changed, 233 insertions, 4 deletions
diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json
index a4ed64f..2b2c164 100644
--- a/coverage_config_x86_64.json
+++ b/coverage_config_x86_64.json
@@ -1 +1 @@
-{"coverage_score": 81.3, "exclude_path": "src/vhost_kern/", "crate_features": "vhost-user-master,vhost-user-slave"} \ No newline at end of file
+{"coverage_score": 81.2, "exclude_path": "src/vhost_kern/", "crate_features": "vhost-user-master,vhost-user-slave"}
diff --git a/src/vhost_user/dummy_slave.rs b/src/vhost_user/dummy_slave.rs
index 9eedcbb..b2b83d2 100644
--- a/src/vhost_user/dummy_slave.rs
+++ b/src/vhost_user/dummy_slave.rs
@@ -8,6 +8,7 @@ use super::*;
pub const MAX_QUEUE_NUM: usize = 2;
pub const MAX_VRING_NUM: usize = 256;
+pub const MAX_MEM_SLOTS: usize = 32;
pub const VIRTIO_FEATURES: u64 = 0x40000003;
#[derive(Default)]
@@ -243,4 +244,16 @@ impl VhostUserSlaveReqHandlerMut for DummySlaveReqHandler {
}
Ok(())
}
+
+ fn get_max_mem_slots(&mut self) -> Result<u64> {
+ Ok(MAX_MEM_SLOTS as u64)
+ }
+
+ fn add_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion, _fd: RawFd) -> Result<()> {
+ Ok(())
+ }
+
+ fn remove_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion) -> Result<()> {
+ Ok(())
+ }
}
diff --git a/src/vhost_user/master.rs b/src/vhost_user/master.rs
index e80bbb8..cc79871 100644
--- a/src/vhost_user/master.rs
+++ b/src/vhost_user/master.rs
@@ -50,6 +50,15 @@ pub trait VhostUserMaster: VhostBackend {
/// Setup slave communication channel.
fn set_slave_request_fd(&mut self, fd: RawFd) -> Result<()>;
+
+ /// Query the maximum amount of memory slots supported by the backend.
+ fn get_max_mem_slots(&mut self) -> Result<u64>;
+
+ /// Add a new guest memory mapping for vhost to use.
+ fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
+
+ /// Remove a guest memory mapping from vhost.
+ fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
}
fn error_code<T>(err: VhostUserError) -> Result<T> {
@@ -435,6 +444,60 @@ impl VhostUserMaster for Master {
node.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?;
Ok(())
}
+
+ fn get_max_mem_slots(&mut self) -> Result<u64> {
+ let mut node = self.node();
+ if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
+ {
+ return error_code(VhostUserError::InvalidOperation);
+ }
+
+ let hdr = node.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?;
+ let val = node.recv_reply::<VhostUserU64>(&hdr)?;
+
+ Ok(val.value)
+ }
+
+ fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
+ let mut node = self.node();
+ if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
+ {
+ return error_code(VhostUserError::InvalidOperation);
+ }
+ if region.memory_size == 0 || region.mmap_handle < 0 {
+ return error_code(VhostUserError::InvalidParam);
+ }
+
+ let body = VhostUserSingleMemoryRegion::new(
+ region.guest_phys_addr,
+ region.memory_size,
+ region.userspace_addr,
+ region.mmap_offset,
+ );
+ let fds = [region.mmap_handle];
+ let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
+ node.wait_for_ack(&hdr).map_err(|e| e.into())
+ }
+
+ fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
+ let mut node = self.node();
+ if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
+ {
+ return error_code(VhostUserError::InvalidOperation);
+ }
+ if region.memory_size == 0 {
+ return error_code(VhostUserError::InvalidParam);
+ }
+
+ let body = VhostUserSingleMemoryRegion::new(
+ region.guest_phys_addr,
+ region.memory_size,
+ region.userspace_addr,
+ region.mmap_offset,
+ );
+ let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
+ node.wait_for_ack(&hdr).map_err(|e| e.into())
+ }
}
impl AsRawFd for Master {
diff --git a/src/vhost_user/message.rs b/src/vhost_user/message.rs
index 8600410..ea2df4e 100644
--- a/src/vhost_user/message.rs
+++ b/src/vhost_user/message.rs
@@ -114,10 +114,30 @@ pub enum MasterReq {
POSTCOPY_END = 30,
/// Get a shared buffer from slave.
GET_INFLIGHT_FD = 31,
- /// Send the shared inflight buffer back to slave
+ /// Send the shared inflight buffer back to slave.
SET_INFLIGHT_FD = 32,
+ /// Sets the GPU protocol socket file descriptor.
+ GPU_SET_SOCKET = 33,
+ /// Ask the vhost user backend to disable all rings and reset all internal
+ /// device state to the initial state.
+ RESET_DEVICE = 34,
+ /// Indicate that a buffer was added to the vring instead of signalling it
+ /// using the vring’s kick file descriptor.
+ VRING_KICK = 35,
+ /// Return a u64 payload containing the maximum number of memory slots.
+ GET_MAX_MEM_SLOTS = 36,
+ /// Update the memory tables by adding the region described.
+ ADD_MEM_REG = 37,
+ /// Update the memory tables by removing the region described.
+ REM_MEM_REG = 38,
+ /// Notify the backend with updated device status as defined in the VIRTIO
+ /// specification.
+ SET_STATUS = 39,
+ /// Query the backend for its device status as defined in the VIRTIO
+ /// specification.
+ GET_STATUS = 40,
/// Upper bound of valid commands.
- MAX_CMD = 33,
+ MAX_CMD = 41,
}
impl Into<u32> for MasterReq {
@@ -459,6 +479,49 @@ impl VhostUserMsgValidator for VhostUserMemoryRegion {
/// Payload of the VhostUserMemory message.
pub type VhostUserMemoryPayload = Vec<VhostUserMemoryRegion>;
+/// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG
+/// requests.
+#[repr(C)]
+#[derive(Default, Clone, Copy)]
+pub struct VhostUserSingleMemoryRegion {
+ /// Padding for correct alignment
+ padding: u64,
+ /// Guest physical address of the memory region.
+ pub guest_phys_addr: u64,
+ /// Size of the memory region.
+ pub memory_size: u64,
+ /// Virtual address in the current process.
+ pub user_addr: u64,
+ /// Offset where region starts in the mapped memory.
+ pub mmap_offset: u64,
+}
+
+impl VhostUserSingleMemoryRegion {
+ /// Create a new instance.
+ pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self {
+ VhostUserSingleMemoryRegion {
+ padding: 0,
+ guest_phys_addr,
+ memory_size,
+ user_addr,
+ mmap_offset,
+ }
+ }
+}
+
+impl VhostUserMsgValidator for VhostUserSingleMemoryRegion {
+ fn is_valid(&self) -> bool {
+ if self.memory_size == 0
+ || self.guest_phys_addr.checked_add(self.memory_size).is_none()
+ || self.user_addr.checked_add(self.memory_size).is_none()
+ || self.mmap_offset.checked_add(self.memory_size).is_none()
+ {
+ return false;
+ }
+ true
+ }
+}
+
/// Vring state descriptor.
#[repr(packed)]
#[derive(Default)]
diff --git a/src/vhost_user/mod.rs b/src/vhost_user/mod.rs
index 079b78d..c1464e8 100644
--- a/src/vhost_user/mod.rs
+++ b/src/vhost_user/mod.rs
@@ -180,6 +180,7 @@ mod dummy_slave;
#[cfg(all(test, feature = "vhost-user-master", feature = "vhost-user-slave"))]
mod tests {
+ use std::fs::File;
use std::os::unix::io::AsRawFd;
use std::path::Path;
use std::sync::{Arc, Barrier, Mutex};
@@ -190,7 +191,7 @@ mod tests {
use super::*;
use crate::backend::VhostBackend;
use crate::{VhostUserMemoryRegionInfo, VringConfigData};
- use tempfile::{Builder, TempDir};
+ use tempfile::{tempfile, Builder, TempDir};
fn temp_dir() -> TempDir {
Builder::new().prefix("/tmp/vhost_test").tempdir().unwrap()
@@ -336,6 +337,15 @@ mod tests {
slave.handle_request().unwrap();
slave.handle_request().unwrap();
+ // get_max_mem_slots()
+ slave.handle_request().unwrap();
+
+ // add_mem_region()
+ slave.handle_request().unwrap();
+
+ // remove_mem_region()
+ slave.handle_request().unwrap();
+
sbar.wait();
});
@@ -398,6 +408,21 @@ mod tests {
master.set_vring_kick(0, &eventfd).unwrap();
master.set_vring_err(0, &eventfd).unwrap();
+ let max_mem_slots = master.get_max_mem_slots().unwrap();
+ assert_eq!(max_mem_slots, 32);
+
+ let region_file = tempfile().unwrap();
+ let region = VhostUserMemoryRegionInfo {
+ guest_phys_addr: 0x10_0000,
+ memory_size: 0x10_0000,
+ userspace_addr: 0,
+ mmap_offset: 0,
+ mmap_handle: region_file.as_raw_fd(),
+ };
+ master.add_mem_region(&region).unwrap();
+
+ master.remove_mem_region(&region).unwrap();
+
mbar.wait();
}
diff --git a/src/vhost_user/slave_req_handler.rs b/src/vhost_user/slave_req_handler.rs
index 3b44e4c..18459a2 100644
--- a/src/vhost_user/slave_req_handler.rs
+++ b/src/vhost_user/slave_req_handler.rs
@@ -62,6 +62,9 @@ pub trait VhostUserSlaveReqHandler {
fn get_config(&self, offset: u32, size: u32, flags: VhostUserConfigFlags) -> Result<Vec<u8>>;
fn set_config(&self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>;
fn set_slave_req_fd(&self, _vu_req: SlaveFsCacheReq) {}
+ fn get_max_mem_slots(&self) -> Result<u64>;
+ fn add_mem_region(&self, region: &VhostUserSingleMemoryRegion, fd: RawFd) -> Result<()>;
+ fn remove_mem_region(&self, region: &VhostUserSingleMemoryRegion) -> Result<()>;
}
/// Services provided to the master by the slave without interior mutability.
@@ -102,6 +105,9 @@ pub trait VhostUserSlaveReqHandlerMut {
) -> Result<Vec<u8>>;
fn set_config(&mut self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>;
fn set_slave_req_fd(&mut self, _vu_req: SlaveFsCacheReq) {}
+ fn get_max_mem_slots(&mut self) -> Result<u64>;
+ fn add_mem_region(&mut self, region: &VhostUserSingleMemoryRegion, fd: RawFd) -> Result<()>;
+ fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> Result<()>;
}
impl<T: VhostUserSlaveReqHandlerMut> VhostUserSlaveReqHandler for Mutex<T> {
@@ -190,6 +196,18 @@ impl<T: VhostUserSlaveReqHandlerMut> VhostUserSlaveReqHandler for Mutex<T> {
fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) {
self.lock().unwrap().set_slave_req_fd(vu_req)
}
+
+ fn get_max_mem_slots(&self) -> Result<u64> {
+ self.lock().unwrap().get_max_mem_slots()
+ }
+
+ fn add_mem_region(&self, region: &VhostUserSingleMemoryRegion, fd: RawFd) -> Result<()> {
+ self.lock().unwrap().add_mem_region(region, fd)
+ }
+
+ fn remove_mem_region(&self, region: &VhostUserSingleMemoryRegion) -> Result<()> {
+ self.lock().unwrap().remove_mem_region(region)
+ }
}
/// Server to handle service requests from masters from the master communication channel.
@@ -417,6 +435,52 @@ impl<S: VhostUserSlaveReqHandler> SlaveReqHandler<S> {
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
self.set_slave_req_fd(&hdr, rfds)?;
}
+ MasterReq::GET_MAX_MEM_SLOTS => {
+ if self.acked_protocol_features
+ & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits()
+ == 0
+ {
+ return Err(Error::InvalidOperation);
+ }
+ self.check_request_size(&hdr, size, 0)?;
+ let num = self.backend.get_max_mem_slots()?;
+ let msg = VhostUserU64::new(num);
+ self.send_reply_message(&hdr, &msg)?;
+ }
+ MasterReq::ADD_MEM_REG => {
+ if self.acked_protocol_features
+ & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits()
+ == 0
+ {
+ return Err(Error::InvalidOperation);
+ }
+ let fd = if let Some(fds) = &rfds {
+ if fds.len() != 1 {
+ return Err(Error::InvalidParam);
+ }
+ fds[0]
+ } else {
+ return Err(Error::InvalidParam);
+ };
+
+ let msg =
+ self.extract_request_body::<VhostUserSingleMemoryRegion>(&hdr, size, &buf)?;
+ let res = self.backend.add_mem_region(&msg, fd);
+ self.send_ack_message(&hdr, res)?;
+ }
+ MasterReq::REM_MEM_REG => {
+ if self.acked_protocol_features
+ & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits()
+ == 0
+ {
+ return Err(Error::InvalidOperation);
+ }
+
+ let msg =
+ self.extract_request_body::<VhostUserSingleMemoryRegion>(&hdr, size, &buf)?;
+ let res = self.backend.remove_mem_region(&msg);
+ self.send_ack_message(&hdr, res)?;
+ }
_ => {
return Err(Error::InvalidMessage);
}
@@ -637,6 +701,7 @@ impl<S: VhostUserSlaveReqHandler> SlaveReqHandler<S> {
MasterReq::SET_LOG_FD => Ok(rfds),
MasterReq::SET_SLAVE_REQ_FD => Ok(rfds),
MasterReq::SET_INFLIGHT_FD => Ok(rfds),
+ MasterReq::ADD_MEM_REG => Ok(rfds),
_ => {
if rfds.is_some() {
Endpoint::<MasterReq>::close_rfds(rfds);