summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLiu Jiang <gerry@linux.alibaba.com>2021-02-19 18:59:54 +0800
committerSergio Lopez <slp@sinrega.org>2021-03-01 12:50:56 +0100
commitd748d5bdcf70b7d565a0600a57e8cf08976babb4 (patch)
tree8197ecde892613ac6c7343fc459e19593889f5f1
parent8c6919bf60bd641398ddd53864fbc74d75548837 (diff)
downloadvmm_vhost-d748d5bdcf70b7d565a0600a57e8cf08976babb4.tar.gz
Introduce VhostBackendMut trait
Originally the VhostBackend trait is designed to take a mutable self, which causes a common usage pattern Arc<RwLock<T: VhostBackend>>. This pattern may enforce serialization among multiple threads. So rename the original VhostBackend as VhostBackendMut, and introduce a new VhostBackend trait with interior mutability to improve performance by removing the serialization. Signed-off-by: Liu Jiang <gerry@linux.alibaba.com>
-rw-r--r--src/backend.rs328
-rw-r--r--src/vhost_kern/mod.rs28
-rw-r--r--src/vhost_user/master.rs32
-rw-r--r--src/vhost_user/mod.rs2
4 files changed, 354 insertions, 36 deletions
diff --git a/src/backend.rs b/src/backend.rs
index 2d1a4a2..9dafef7 100644
--- a/src/backend.rs
+++ b/src/backend.rs
@@ -1,4 +1,4 @@
-// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved.
// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@@ -9,14 +9,18 @@
//! Common traits and structs for vhost-kern and vhost-user backend drivers.
-use super::Result;
+use std::cell::RefCell;
use std::os::unix::io::RawFd;
+use std::sync::RwLock;
+
use vmm_sys_util::eventfd::EventFd;
+use super::Result;
+
/// Maximum number of memory regions supported.
pub const VHOST_MAX_MEMORY_REGIONS: usize = 255;
-/// Vring/virtque configuration data.
+/// Vring configuration data.
pub struct VringConfigData {
/// Maximum queue size supported by the driver.
pub queue_max_size: u16,
@@ -65,22 +69,109 @@ pub struct VhostUserMemoryRegionInfo {
pub userspace_addr: u64,
/// Optional offset where region starts in the mapped memory.
pub mmap_offset: u64,
- /// Optional file diescriptor for mmap
+ /// Optional file descriptor for mmap.
pub mmap_handle: RawFd,
}
-/// An interface for setting up vhost-based backend drivers.
+/// An interface for setting up vhost-based backend drivers with interior mutability.
///
/// Vhost devices are subset of virtio devices, which improve virtio device's performance by
/// delegating data plane operations to dedicated IO service processes. Vhost devices use the
/// same virtqueue layout as virtio devices to allow vhost devices to be mapped directly to
/// virtio devices.
+///
/// The purpose of vhost is to implement a subset of a virtio device's functionality outside the
/// VMM process. Typically fast paths for IO operations are delegated to the dedicated IO service
/// processes, and slow path for device configuration are still handled by the VMM process. It may
/// also be used to control access permissions of virtio backend devices.
pub trait VhostBackend: std::marker::Sized {
/// Get a bitmask of supported virtio/vhost features.
+ fn get_features(&self) -> Result<u64>;
+
+ /// Inform the vhost subsystem which features to enable.
+ /// This should be a subset of supported features from get_features().
+ ///
+ /// # Arguments
+ /// * `features` - Bitmask of features to set.
+ fn set_features(&self, features: u64) -> Result<()>;
+
+ /// Set the current process as the owner of the vhost backend.
+ /// This must be run before any other vhost commands.
+ fn set_owner(&self) -> Result<()>;
+
+ /// Used to be sent to request disabling all rings
+ /// This is no longer used.
+ fn reset_owner(&self) -> Result<()>;
+
+ /// Set the guest memory mappings for vhost to use.
+ fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>;
+
+ /// Set base address for page modification logging.
+ fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()>;
+
+ /// Specify an eventfd file descriptor to signal on log write.
+ fn set_log_fd(&self, fd: RawFd) -> Result<()>;
+
+ /// Set the number of descriptors in the vring.
+ ///
+ /// # Arguments
+ /// * `queue_index` - Index of the queue to set descriptor count for.
+ /// * `num` - Number of descriptors in the queue.
+ fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>;
+
+ /// Set the addresses for a given vring.
+ ///
+ /// # Arguments
+ /// * `queue_index` - Index of the queue to set addresses for.
+ /// * `config_data` - Configuration data for a vring.
+ fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>;
+
+ /// Set the first index to look for available descriptors.
+ ///
+ /// # Arguments
+ /// * `queue_index` - Index of the queue to modify.
+ /// * `num` - Index where available descriptors start.
+ fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>;
+
+ /// Get the available vring base offset.
+ fn get_vring_base(&self, queue_index: usize) -> Result<u32>;
+
+ /// Set the eventfd to trigger when buffers have been used by the host.
+ ///
+ /// # Arguments
+ /// * `queue_index` - Index of the queue to modify.
+ /// * `fd` - EventFd to trigger.
+ fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>;
+
+ /// Set the eventfd that will be signaled by the guest when buffers are
+ /// available for the host to process.
+ ///
+ /// # Arguments
+ /// * `queue_index` - Index of the queue to modify.
+ /// * `fd` - EventFd that will be signaled from guest.
+ fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>;
+
+ /// Set the eventfd that will be signaled by the guest when error happens.
+ ///
+ /// # Arguments
+ /// * `queue_index` - Index of the queue to modify.
+ /// * `fd` - EventFd that will be signaled from guest.
+ fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>;
+}
+
+/// An interface for setting up vhost-based backend drivers.
+///
+/// Vhost devices are subset of virtio devices, which improve virtio device's performance by
+/// delegating data plane operations to dedicated IO service processes. Vhost devices use the
+/// same virtqueue layout as virtio devices to allow vhost devices to be mapped directly to
+/// virtio devices.
+///
+/// The purpose of vhost is to implement a subset of a virtio device's functionality outside the
+/// VMM process. Typically fast paths for IO operations are delegated to the dedicated IO service
+/// processes, and slow path for device configuration are still handled by the VMM process. It may
+/// also be used to control access permissions of virtio backend devices.
+pub trait VhostBackendMut: std::marker::Sized {
+ /// Get a bitmask of supported virtio/vhost features.
fn get_features(&mut self) -> Result<u64>;
/// Inform the vhost subsystem which features to enable.
@@ -154,10 +245,237 @@ pub trait VhostBackend: std::marker::Sized {
fn set_vring_err(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>;
}
+impl<T: VhostBackendMut> VhostBackend for RwLock<T> {
+ fn get_features(&self) -> Result<u64> {
+ self.write().unwrap().get_features()
+ }
+
+ fn set_features(&self, features: u64) -> Result<()> {
+ self.write().unwrap().set_features(features)
+ }
+
+ fn set_owner(&self) -> Result<()> {
+ self.write().unwrap().set_owner()
+ }
+
+ fn reset_owner(&self) -> Result<()> {
+ self.write().unwrap().reset_owner()
+ }
+
+ fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
+ self.write().unwrap().set_mem_table(regions)
+ }
+
+ fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
+ self.write().unwrap().set_log_base(base, fd)
+ }
+
+ fn set_log_fd(&self, fd: RawFd) -> Result<()> {
+ self.write().unwrap().set_log_fd(fd)
+ }
+
+ fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
+ self.write().unwrap().set_vring_num(queue_index, num)
+ }
+
+ fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
+ self.write()
+ .unwrap()
+ .set_vring_addr(queue_index, config_data)
+ }
+
+ fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
+ self.write().unwrap().set_vring_base(queue_index, base)
+ }
+
+ fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
+ self.write().unwrap().get_vring_base(queue_index)
+ }
+
+ fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ self.write().unwrap().set_vring_call(queue_index, fd)
+ }
+
+ fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ self.write().unwrap().set_vring_kick(queue_index, fd)
+ }
+
+ fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ self.write().unwrap().set_vring_err(queue_index, fd)
+ }
+}
+
+impl<T: VhostBackendMut> VhostBackend for RefCell<T> {
+ fn get_features(&self) -> Result<u64> {
+ self.borrow_mut().get_features()
+ }
+
+ fn set_features(&self, features: u64) -> Result<()> {
+ self.borrow_mut().set_features(features)
+ }
+
+ fn set_owner(&self) -> Result<()> {
+ self.borrow_mut().set_owner()
+ }
+
+ fn reset_owner(&self) -> Result<()> {
+ self.borrow_mut().reset_owner()
+ }
+
+ fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
+ self.borrow_mut().set_mem_table(regions)
+ }
+
+ fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
+ self.borrow_mut().set_log_base(base, fd)
+ }
+
+ fn set_log_fd(&self, fd: RawFd) -> Result<()> {
+ self.borrow_mut().set_log_fd(fd)
+ }
+
+ fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
+ self.borrow_mut().set_vring_num(queue_index, num)
+ }
+
+ fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
+ self.borrow_mut().set_vring_addr(queue_index, config_data)
+ }
+
+ fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
+ self.borrow_mut().set_vring_base(queue_index, base)
+ }
+
+ fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
+ self.borrow_mut().get_vring_base(queue_index)
+ }
+
+ fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ self.borrow_mut().set_vring_call(queue_index, fd)
+ }
+
+ fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ self.borrow_mut().set_vring_kick(queue_index, fd)
+ }
+
+ fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ self.borrow_mut().set_vring_err(queue_index, fd)
+ }
+}
#[cfg(test)]
mod tests {
use VringConfigData;
+ struct MockBackend {}
+
+ impl VhostBackendMut for MockBackend {
+ fn get_features(&mut self) -> Result<u64> {
+ Ok(0x1)
+ }
+
+ fn set_features(&mut self, features: u64) -> Result<()> {
+ assert_eq!(features, 0x1);
+ Ok(())
+ }
+
+ fn set_owner(&mut self) -> Result<()> {
+ Ok(())
+ }
+
+ fn reset_owner(&mut self) -> Result<()> {
+ Ok(())
+ }
+
+ fn set_mem_table(&mut self, _regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
+ Ok(())
+ }
+
+ fn set_log_base(&mut self, base: u64, fd: Option<RawFd>) -> Result<()> {
+ assert_eq!(base, 0x100);
+ assert_eq!(fd, Some(100));
+ Ok(())
+ }
+
+ fn set_log_fd(&mut self, fd: RawFd) -> Result<()> {
+ assert_eq!(fd, 100);
+ Ok(())
+ }
+
+ fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()> {
+ assert_eq!(queue_index, 1);
+ assert_eq!(num, 256);
+ Ok(())
+ }
+
+ fn set_vring_addr(
+ &mut self,
+ queue_index: usize,
+ _config_data: &VringConfigData,
+ ) -> Result<()> {
+ assert_eq!(queue_index, 1);
+ Ok(())
+ }
+
+ fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()> {
+ assert_eq!(queue_index, 1);
+ assert_eq!(base, 2);
+ Ok(())
+ }
+
+ fn get_vring_base(&mut self, queue_index: usize) -> Result<u32> {
+ assert_eq!(queue_index, 1);
+ Ok(2)
+ }
+
+ fn set_vring_call(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> {
+ assert_eq!(queue_index, 1);
+ Ok(())
+ }
+
+ fn set_vring_kick(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> {
+ assert_eq!(queue_index, 1);
+ Ok(())
+ }
+
+ fn set_vring_err(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> {
+ assert_eq!(queue_index, 1);
+ Ok(())
+ }
+ }
+
+ #[test]
+ fn test_vring_backend_mut() {
+ let b = RwLock::new(MockBackend {});
+
+ assert_eq!(b.get_features().unwrap(), 0x1);
+ b.set_features(0x1).unwrap();
+ b.set_owner().unwrap();
+ b.reset_owner().unwrap();
+ b.set_mem_table(&[]).unwrap();
+ b.set_log_base(0x100, Some(100)).unwrap();
+ b.set_log_fd(100).unwrap();
+ b.set_vring_num(1, 256).unwrap();
+
+ let config = VringConfigData {
+ queue_max_size: 0x1000,
+ queue_size: 0x2000,
+ flags: 0x0,
+ desc_table_addr: 0x4000,
+ used_ring_addr: 0x5000,
+ avail_ring_addr: 0x6000,
+ log_addr: None,
+ };
+ b.set_vring_addr(1, &config).unwrap();
+
+ b.set_vring_base(1, 2).unwrap();
+ assert_eq!(b.get_vring_base(1).unwrap(), 2);
+
+ let eventfd = EventFd::new(0).unwrap();
+ b.set_vring_call(1, &eventfd).unwrap();
+ b.set_vring_kick(1, &eventfd).unwrap();
+ b.set_vring_err(1, &eventfd).unwrap();
+ }
+
#[test]
fn test_vring_config_data() {
let mut config = VringConfigData {
diff --git a/src/vhost_kern/mod.rs b/src/vhost_kern/mod.rs
index 350e134..248cbae 100644
--- a/src/vhost_kern/mod.rs
+++ b/src/vhost_kern/mod.rs
@@ -87,20 +87,20 @@ pub trait VhostKernBackend: AsRawFd {
impl<T: VhostKernBackend> VhostBackend for T {
/// Set the current process as the owner of this file descriptor.
/// This must be run before any other vhost ioctls.
- fn set_owner(&mut self) -> Result<()> {
+ fn set_owner(&self) -> Result<()> {
// This ioctl is called on a valid vhost fd and has its return value checked.
let ret = unsafe { ioctl(self, VHOST_SET_OWNER()) };
ioctl_result(ret, ())
}
- fn reset_owner(&mut self) -> Result<()> {
+ fn reset_owner(&self) -> Result<()> {
// This ioctl is called on a valid vhost fd and has its return value checked.
let ret = unsafe { ioctl(self, VHOST_RESET_OWNER()) };
ioctl_result(ret, ())
}
/// Get a bitmask of supported virtio/vhost features.
- fn get_features(&mut self) -> Result<u64> {
+ fn get_features(&self) -> Result<u64> {
let mut avail_features: u64 = 0;
// This ioctl is called on a valid vhost fd and has its return value checked.
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_GET_FEATURES(), &mut avail_features) };
@@ -112,14 +112,14 @@ impl<T: VhostKernBackend> VhostBackend for T {
///
/// # Arguments
/// * `features` - Bitmask of features to set.
- fn set_features(&mut self, features: u64) -> Result<()> {
+ fn set_features(&self, features: u64) -> Result<()> {
// This ioctl is called on a valid vhost fd and has its return value checked.
let ret = unsafe { ioctl_with_ref(self, VHOST_SET_FEATURES(), &features) };
ioctl_result(ret, ())
}
/// Set the guest memory mappings for vhost to use.
- fn set_mem_table(&mut self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
+ fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
if regions.is_empty() || regions.len() > VHOST_MAX_MEMORY_REGIONS {
return Err(Error::InvalidGuestMemory);
}
@@ -148,7 +148,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
///
/// # Arguments
/// * `base` - Base address for page modification logging.
- fn set_log_base(&mut self, base: u64, fd: Option<RawFd>) -> Result<()> {
+ fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
if fd.is_some() {
return Err(Error::LogAddress);
}
@@ -159,7 +159,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
}
/// Specify an eventfd file descriptor to signal on log write.
- fn set_log_fd(&mut self, fd: RawFd) -> Result<()> {
+ fn set_log_fd(&self, fd: RawFd) -> Result<()> {
// This ioctl is called on a valid vhost fd and has its return value checked.
let val: i32 = fd;
let ret = unsafe { ioctl_with_ref(self, VHOST_SET_LOG_FD(), &val) };
@@ -171,7 +171,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
/// # Arguments
/// * `queue_index` - Index of the queue to set descriptor count for.
/// * `num` - Number of descriptors in the queue.
- fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()> {
+ fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: u32::from(num),
@@ -187,7 +187,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
/// # Arguments
/// * `queue_index` - Index of the queue to set addresses for.
/// * `config_data` - Vring config data.
- fn set_vring_addr(&mut self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
+ fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
if !self.is_valid(config_data) {
return Err(Error::InvalidQueue);
}
@@ -212,7 +212,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
/// # Arguments
/// * `queue_index` - Index of the queue to modify.
/// * `num` - Index where available descriptors start.
- fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()> {
+ fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: u32::from(base),
@@ -224,7 +224,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
}
/// Get a bitmask of supported virtio/vhost features.
- fn get_vring_base(&mut self, queue_index: usize) -> Result<u32> {
+ fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: 0,
@@ -239,7 +239,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
/// # Arguments
/// * `queue_index` - Index of the queue to modify.
/// * `fd` - EventFd to trigger.
- fn set_vring_call(&mut self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let vring_file = vhost_vring_file {
index: queue_index as u32,
fd: fd.as_raw_fd(),
@@ -256,7 +256,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
/// # Arguments
/// * `queue_index` - Index of the queue to modify.
/// * `fd` - EventFd that will be signaled from guest.
- fn set_vring_kick(&mut self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let vring_file = vhost_vring_file {
index: queue_index as u32,
fd: fd.as_raw_fd(),
@@ -272,7 +272,7 @@ impl<T: VhostKernBackend> VhostBackend for T {
/// # Arguments
/// * `queue_index` - Index of the queue to modify.
/// * `fd` - EventFd that will be signaled from the backend.
- fn set_vring_err(&mut self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let vring_file = vhost_vring_file {
index: queue_index as u32,
fd: fd.as_raw_fd(),
diff --git a/src/vhost_user/master.rs b/src/vhost_user/master.rs
index ffed909..2651b84 100644
--- a/src/vhost_user/master.rs
+++ b/src/vhost_user/master.rs
@@ -115,7 +115,7 @@ impl Master {
impl VhostBackend for Master {
/// Get from the underlying vhost implementation the feature bitmask.
- fn get_features(&mut self) -> Result<u64> {
+ fn get_features(&self) -> Result<u64> {
let mut node = self.node.lock().unwrap();
let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
@@ -124,7 +124,7 @@ impl VhostBackend for Master {
}
/// Enable features in the underlying vhost implementation using a bitmask.
- fn set_features(&mut self, features: u64) -> Result<()> {
+ fn set_features(&self, features: u64) -> Result<()> {
let mut node = self.node.lock().unwrap();
let val = VhostUserU64::new(features);
let _ = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
@@ -135,7 +135,7 @@ impl VhostBackend for Master {
}
/// Set the current Master as an owner of the session.
- fn set_owner(&mut self) -> Result<()> {
+ fn set_owner(&self) -> Result<()> {
// We unwrap() the return value to assert that we are not expecting threads to ever fail
// while holding the lock.
let mut node = self.node.lock().unwrap();
@@ -145,7 +145,7 @@ impl VhostBackend for Master {
Ok(())
}
- fn reset_owner(&mut self) -> Result<()> {
+ fn reset_owner(&self) -> Result<()> {
let mut node = self.node.lock().unwrap();
let _ = node.send_request_header(MasterReq::RESET_OWNER, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
@@ -155,7 +155,7 @@ impl VhostBackend for Master {
/// Set the memory map regions on the slave so it can translate the vring
/// addresses. In the ancillary data there is an array of file descriptors
- fn set_mem_table(&mut self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
+ fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES {
return error_code(VhostUserError::InvalidParam);
}
@@ -187,7 +187,7 @@ impl VhostBackend for Master {
// Clippy doesn't seem to know that if let with && is still experimental
#[allow(clippy::unnecessary_unwrap)]
- fn set_log_base(&mut self, base: u64, fd: Option<RawFd>) -> Result<()> {
+ fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
let mut node = self.node.lock().unwrap();
let val = VhostUserU64::new(base);
@@ -202,7 +202,7 @@ impl VhostBackend for Master {
Ok(())
}
- fn set_log_fd(&mut self, fd: RawFd) -> Result<()> {
+ fn set_log_fd(&self, fd: RawFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
let fds = [fd];
node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
@@ -210,7 +210,7 @@ impl VhostBackend for Master {
}
/// Set the size of the queue.
- fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()> {
+ fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
@@ -222,7 +222,7 @@ impl VhostBackend for Master {
}
/// Sets the addresses of the different aspects of the vring.
- fn set_vring_addr(&mut self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
+ fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num
|| config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0
@@ -236,7 +236,7 @@ impl VhostBackend for Master {
}
/// Sets the base offset in the available vring.
- fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()> {
+ fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
@@ -247,7 +247,7 @@ impl VhostBackend for Master {
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
- fn get_vring_base(&mut self, queue_index: usize) -> Result<u32> {
+ fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
@@ -263,7 +263,7 @@ impl VhostBackend for Master {
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// will be used instead of waiting for the call.
- fn set_vring_call(&mut self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
@@ -276,7 +276,7 @@ impl VhostBackend for Master {
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// should be used instead of waiting for a kick.
- fn set_vring_kick(&mut self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
@@ -288,7 +288,7 @@ impl VhostBackend for Master {
/// Set the event file descriptor to signal when error occurs.
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data.
- fn set_vring_err(&mut self, queue_index: usize, fd: &EventFd) -> Result<()> {
+ fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
@@ -654,7 +654,7 @@ mod tests {
let listener = Listener::new(UNIX_SOCKET_MASTER, true).unwrap();
listener.set_nonblocking(true).unwrap();
- let mut master = Master::connect(UNIX_SOCKET_MASTER, 1).unwrap();
+ let master = Master::connect(UNIX_SOCKET_MASTER, 1).unwrap();
let mut slave = Endpoint::<MasterReq>::from_stream(listener.accept().unwrap().unwrap());
// Send two messages continuously
@@ -692,7 +692,7 @@ mod tests {
#[test]
#[ignore]
fn test_features() {
- let (mut master, mut peer) = create_pair(UNIX_SOCKET_MASTER3);
+ let (master, mut peer) = create_pair(UNIX_SOCKET_MASTER3);
master.set_owner().unwrap();
let (hdr, rfds) = peer.recv_header().unwrap();
diff --git a/src/vhost_user/mod.rs b/src/vhost_user/mod.rs
index 48a93ff..4259d0f 100644
--- a/src/vhost_user/mod.rs
+++ b/src/vhost_user/mod.rs
@@ -203,7 +203,7 @@ mod tests {
#[test]
fn test_set_owner() {
let slave_be = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
- let (mut master, mut slave) =
+ let (master, mut slave) =
create_slave("/tmp/vhost_user_lib_unit_test_owner", slave_be.clone());
assert_eq!(slave_be.lock().unwrap().owned, false);