summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLiu Bo <bo.liu@linux.alibaba.com>2020-02-12 15:02:01 -0800
committerAndreea Florescu <andreea.florescu15@gmail.com>2020-09-04 17:59:53 +0300
commit3e87bd964e48f7ed1e85894d0cb9a1369798b5b7 (patch)
tree7893ad0ef91621ff92cf0cd676efebd511210af5
parent09bd21f75e7b26f38ceef13e61d933bacd80f7f2 (diff)
downloadvmm_vhost-3e87bd964e48f7ed1e85894d0cb9a1369798b5b7.tar.gz
vhost-user: add SlaveFsCacheReq handle map/unmap
This introduces SlaveFsCacheReq which implements VhostUserMasterReqHandler to handle map/unmap requests. Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
-rw-r--r--src/vhost_user/mod.rs8
-rw-r--r--src/vhost_user/slave_fs_cache.rs94
2 files changed, 102 insertions, 0 deletions
diff --git a/src/vhost_user/mod.rs b/src/vhost_user/mod.rs
index a452232..b1f724e 100644
--- a/src/vhost_user/mod.rs
+++ b/src/vhost_user/mod.rs
@@ -41,6 +41,9 @@ pub use self::slave::SlaveListener;
mod slave_req_handler;
#[cfg(feature = "vhost-user-slave")]
pub use self::slave_req_handler::{SlaveReqHandler, VhostUserSlaveReqHandler};
+#[cfg(feature = "vhost-user-slave")]
+mod slave_fs_cache;
+pub use self::slave_fs_cache::SlaveFsCacheReq;
pub mod sock_ctrl_msg;
@@ -69,6 +72,8 @@ pub enum Error {
SocketRetry(std::io::Error),
/// Failure from the slave side.
SlaveInternalError,
+ /// Failure from the master side.
+ MasterInternalError,
/// Virtio/protocol features mismatch.
FeatureMismatch,
/// Error from request handler
@@ -89,6 +94,7 @@ impl std::fmt::Display for Error {
Error::SocketBroken(e) => write!(f, "socket is broken: {}", e),
Error::SocketRetry(e) => write!(f, "temporary socket error: {}", e),
Error::SlaveInternalError => write!(f, "slave internal error"),
+ Error::MasterInternalError => write!(f, "Master internal error"),
Error::FeatureMismatch => write!(f, "virtio/protocol features mismatch"),
Error::ReqHandlerError(e) => write!(f, "handler failed to handle request: {}", e),
}
@@ -105,6 +111,8 @@ impl Error {
Error::SocketBroken(_) => true,
// Slave internal error, hope it recovers on reconnect.
Error::SlaveInternalError => true,
+ // Master internal error, hope it recovers on reconnect.
+ Error::MasterInternalError => true,
// Should just retry the IO operation instead of rebuilding the underline connection.
Error::SocketRetry(_) => false,
Error::InvalidParam | Error::InvalidOperation => false,
diff --git a/src/vhost_user/slave_fs_cache.rs b/src/vhost_user/slave_fs_cache.rs
new file mode 100644
index 0000000..a4a1e5e
--- /dev/null
+++ b/src/vhost_user/slave_fs_cache.rs
@@ -0,0 +1,94 @@
+// Copyright (C) 2020 Alibaba Cloud Computing. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use super::connection::Endpoint;
+use super::message::*;
+use super::{Error, HandlerResult, Result, VhostUserMasterReqHandler};
+use std::io;
+use std::mem;
+use std::os::unix::io::RawFd;
+use std::os::unix::net::UnixStream;
+use std::sync::{Arc, Mutex};
+
+struct SlaveFsCacheReqInternal {
+ sock: Endpoint<SlaveReq>,
+}
+
+/// A vhost-user slave endpoint which sends fs cache requests to the master
+#[derive(Clone)]
+pub struct SlaveFsCacheReq {
+ // underlying Unix domain socket for communication
+ node: Arc<Mutex<SlaveFsCacheReqInternal>>,
+
+ // whether the endpoint has encountered any failure
+ error: Option<i32>,
+}
+
+impl SlaveFsCacheReq {
+ fn new(ep: Endpoint<SlaveReq>) -> Self {
+ SlaveFsCacheReq {
+ node: Arc::new(Mutex::new(SlaveFsCacheReqInternal { sock: ep })),
+ error: None,
+ }
+ }
+
+ /// Create a new instance.
+ pub fn from_stream(sock: UnixStream) -> Self {
+ Self::new(Endpoint::<SlaveReq>::from_stream(sock))
+ }
+
+ fn send_message(
+ &mut self,
+ flags: SlaveReq,
+ fs: &VhostUserFSSlaveMsg,
+ fds: Option<&[RawFd]>,
+ ) -> Result<()> {
+ self.check_state()?;
+
+ let len = mem::size_of::<VhostUserFSSlaveMsg>();
+ let mut hdr = VhostUserMsgHeader::new(flags, 0, len as u32);
+ hdr.set_need_reply(true);
+ self.node.lock().unwrap().sock.send_message(&hdr, fs, fds)?;
+
+ self.wait_for_ack(&hdr)
+ }
+
+ fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<SlaveReq>) -> Result<()> {
+ self.check_state()?;
+ let (reply, body, rfds) = self.node.lock().unwrap().sock.recv_body::<VhostUserU64>()?;
+ if !reply.is_reply_for(&hdr) || rfds.is_some() || !body.is_valid() {
+ Endpoint::<SlaveReq>::close_rfds(rfds);
+ return Err(Error::InvalidMessage);
+ }
+ if body.value != 0 {
+ return Err(Error::MasterInternalError);
+ }
+ Ok(())
+ }
+
+ fn check_state(&self) -> Result<()> {
+ match self.error {
+ Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))),
+ None => Ok(()),
+ }
+ }
+
+ /// Mark endpoint as failed with specified error code.
+ pub fn set_failed(&mut self, error: i32) {
+ self.error = Some(error);
+ }
+}
+
+impl VhostUserMasterReqHandler for SlaveFsCacheReq {
+ /// Handle virtio-fs map file requests from the slave.
+ fn fs_slave_map(&mut self, fs: &VhostUserFSSlaveMsg, fd: RawFd) -> HandlerResult<()> {
+ self.send_message(SlaveReq::FS_MAP, fs, Some(&[fd]))
+ .or_else(|e| Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))))
+ }
+
+ /// Handle virtio-fs unmap file requests from the slave.
+ fn fs_slave_unmap(&mut self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<()> {
+ self.send_message(SlaveReq::FS_UNMAP, fs, None)
+ .or_else(|e| Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))))
+ }
+}