summaryrefslogtreecommitdiff
path: root/src/vhost_user/master.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/vhost_user/master.rs')
-rw-r--r--src/vhost_user/master.rs257
1 files changed, 234 insertions, 23 deletions
diff --git a/src/vhost_user/master.rs b/src/vhost_user/master.rs
index cc754b5..be2892e 100644
--- a/src/vhost_user/master.rs
+++ b/src/vhost_user/master.rs
@@ -6,7 +6,7 @@
use std::mem;
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::net::UnixStream;
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, Mutex, MutexGuard};
use vmm_sys_util::eventfd::EventFd;
@@ -78,6 +78,10 @@ impl Master {
}
}
+ fn node(&self) -> MutexGuard<MasterInternal> {
+ self.node.lock().unwrap()
+ }
+
/// Create a new instance from a Unix stream socket.
pub fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
Self::new(Endpoint::<MasterReq>::from_stream(sock), max_queue_num)
@@ -116,7 +120,7 @@ impl Master {
impl VhostBackend for Master {
/// Get from the underlying vhost implementation the feature bitmask.
fn get_features(&self) -> Result<u64> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
node.virtio_features = val.value;
@@ -125,7 +129,7 @@ impl VhostBackend for Master {
/// Enable features in the underlying vhost implementation using a bitmask.
fn set_features(&self, features: u64) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let val = VhostUserU64::new(features);
let _ = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
@@ -138,7 +142,7 @@ impl VhostBackend for Master {
fn set_owner(&self) -> Result<()> {
// We unwrap() the return value to assert that we are not expecting threads to ever fail
// while holding the lock.
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let _ = node.send_request_header(MasterReq::SET_OWNER, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
// completed yet.
@@ -146,7 +150,7 @@ impl VhostBackend for Master {
}
fn reset_owner(&self) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let _ = node.send_request_header(MasterReq::RESET_OWNER, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
// completed yet.
@@ -174,7 +178,7 @@ impl VhostBackend for Master {
ctx.append(&reg, region.mmap_handle);
}
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let body = VhostUserMemory::new(ctx.regions.len() as u32);
let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() };
let hdr = node.send_request_with_payload(
@@ -189,7 +193,7 @@ impl VhostBackend for Master {
// Clippy doesn't seem to know that if let with && is still experimental
#[allow(clippy::unnecessary_unwrap)]
fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let val = VhostUserU64::new(base);
if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0
@@ -204,7 +208,7 @@ impl VhostBackend for Master {
}
fn set_log_fd(&self, fd: RawFd) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let fds = [fd];
node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
Ok(())
@@ -212,7 +216,7 @@ impl VhostBackend for Master {
/// Set the size of the queue.
fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@@ -224,7 +228,7 @@ impl VhostBackend for Master {
/// Sets the addresses of the different aspects of the vring.
fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num
|| config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0
{
@@ -238,7 +242,7 @@ impl VhostBackend for Master {
/// Sets the base offset in the available vring.
fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@@ -249,7 +253,7 @@ impl VhostBackend for Master {
}
fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@@ -265,7 +269,7 @@ impl VhostBackend for Master {
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// will be used instead of waiting for the call.
fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@@ -278,7 +282,7 @@ impl VhostBackend for Master {
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// should be used instead of waiting for a kick.
fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@@ -290,7 +294,7 @@ impl VhostBackend for Master {
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data.
fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@@ -301,7 +305,7 @@ impl VhostBackend for Master {
impl VhostUserMaster for Master {
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
if node.virtio_features & flag == 0 || node.acked_virtio_features & flag == 0 {
return error_code(VhostUserError::InvalidOperation);
@@ -318,7 +322,7 @@ impl VhostUserMaster for Master {
}
fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
if node.virtio_features & flag == 0 || node.acked_virtio_features & flag == 0 {
return error_code(VhostUserError::InvalidOperation);
@@ -333,7 +337,7 @@ impl VhostUserMaster for Master {
}
fn get_queue_num(&mut self) -> Result<u64> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if !node.is_feature_mq_available() {
return error_code(VhostUserError::InvalidOperation);
}
@@ -348,7 +352,7 @@ impl VhostUserMaster for Master {
}
fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
// set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled.
if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
@@ -374,7 +378,7 @@ impl VhostUserMaster for Master {
return error_code(VhostUserError::InvalidParam);
}
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
// depends on VhostUserProtocolFeatures::CONFIG
if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
@@ -409,7 +413,7 @@ impl VhostUserMaster for Master {
return error_code(VhostUserError::InvalidParam);
}
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
// depends on VhostUserProtocolFeatures::CONFIG
if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
@@ -420,7 +424,7 @@ impl VhostUserMaster for Master {
}
fn set_slave_request_fd(&mut self, fd: RawFd) -> Result<()> {
- let mut node = self.node.lock().unwrap();
+ let mut node = self.node();
if node.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
}
@@ -433,7 +437,7 @@ impl VhostUserMaster for Master {
impl AsRawFd for Master {
fn as_raw_fd(&self) -> RawFd {
- let node = self.node.lock().unwrap();
+ let node = self.node();
node.main_sock.as_raw_fd()
}
}
@@ -783,4 +787,211 @@ mod tests {
peer.send_message(&hdr, &msg, None).unwrap();
assert!(master.get_protocol_features().is_err());
}
+
+ #[test]
+ fn test_master_set_config_negative() {
+ let path = temp_path();
+ let (mut master, _peer) = create_pair(&path);
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ master
+ .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .unwrap_err();
+
+ {
+ let mut node = master.node();
+ node.virtio_features = 0xffff_ffff;
+ node.acked_virtio_features = 0xffff_ffff;
+ node.protocol_features = 0xffff_ffff;
+ node.acked_protocol_features = 0xffff_ffff;
+ }
+
+ master
+ .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .unwrap();
+ master
+ .set_config(0x0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .unwrap_err();
+ master
+ .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .unwrap_err();
+ master
+ .set_config(
+ 0x100,
+ unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) },
+ &buf[0..4],
+ )
+ .unwrap_err();
+ master
+ .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf)
+ .unwrap_err();
+ master
+ .set_config(0x100, VhostUserConfigFlags::WRITABLE, &[])
+ .unwrap_err();
+ }
+
+ fn create_pair2() -> (Master, Endpoint<MasterReq>) {
+ let path = temp_path();
+ let (master, peer) = create_pair(&path);
+
+ {
+ let mut node = master.node();
+ node.virtio_features = 0xffff_ffff;
+ node.acked_virtio_features = 0xffff_ffff;
+ node.protocol_features = 0xffff_ffff;
+ node.acked_protocol_features = 0xffff_ffff;
+ }
+
+ (master, peer)
+ }
+
+ #[test]
+ fn test_master_get_config_negative0() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+
+ hdr.set_code(MasterReq::GET_FEATURES);
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_err());
+ hdr.set_code(MasterReq::GET_CONFIG);
+ }
+
+ #[test]
+ fn test_master_get_config_negative1() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+
+ hdr.set_reply(false);
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_err());
+ }
+
+ #[test]
+ fn test_master_get_config_negative2() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+ }
+
+ #[test]
+ fn test_master_get_config_negative3() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+
+ msg.offset = 0;
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_err());
+ }
+
+ #[test]
+ fn test_master_get_config_negative4() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+
+ msg.offset = 0x101;
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_err());
+ }
+
+ #[test]
+ fn test_master_get_config_negative5() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+
+ msg.offset = (MAX_MSG_SIZE + 1) as u32;
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_err());
+ }
+
+ #[test]
+ fn test_master_get_config_negative6() {
+ let (mut master, mut peer) = create_pair2();
+ let buf = vec![0x0; MAX_MSG_SIZE + 1];
+
+ let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_ok());
+
+ msg.size = 6;
+ peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
+ .unwrap();
+ assert!(master
+ .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
+ .is_err());
+ }
+
+ #[test]
+ fn test_maset_set_mem_table_failure() {
+ let (master, _peer) = create_pair2();
+
+ master.set_mem_table(&[]).unwrap_err();
+ let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1];
+ master.set_mem_table(&tables).unwrap_err();
+ }
}