diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 7bd20e29..5b299062 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 73.04, + "coverage_score": 73.79, "exclude_path": "vhost/src/vhost_kern/", "crate_features": "vhost/vhost-user-frontend,vhost/vhost-user-backend,vhost-user-backend/postcopy" } diff --git a/vhost/CHANGELOG.md b/vhost/CHANGELOG.md index 3baf2447..241191a5 100644 --- a/vhost/CHANGELOG.md +++ b/vhost/CHANGELOG.md @@ -2,6 +2,7 @@ ## [Unreleased] ### Added +- [[#251]](https://github.com/rust-vmm/vhost/pull/251) Add `SHMEM_MAP` and `SHMEM_UNMAP` support - [[#241]](https://github.com/rust-vmm/vhost/pull/241) Add shared objects support - [[#239]](https://github.com/rust-vmm/vhost/pull/239) Add support for `VHOST_USER_GPU_SET_SOCKET` diff --git a/vhost/src/vhost_user/backend_req.rs b/vhost/src/vhost_user/backend_req.rs index f95f8123..56cbac89 100644 --- a/vhost/src/vhost_user/backend_req.rs +++ b/vhost/src/vhost_user/backend_req.rs @@ -183,6 +183,16 @@ impl VhostUserFrontendReqHandler for Backend { Some(&[fd.as_raw_fd()]), ) } + + /// Forward vhost-user memory map file request to the frontend. + fn shmem_map(&self, req: &VhostUserMMap, fd: &dyn AsRawFd) -> HandlerResult { + self.send_message(BackendReq::SHMEM_MAP, req, Some(&[fd.as_raw_fd()])) + } + + /// Forward vhost-user memory unmap file request to the frontend. + fn shmem_unmap(&self, req: &VhostUserMMap) -> HandlerResult { + self.send_message(BackendReq::SHMEM_UNMAP, req, None) + } } #[cfg(test)] @@ -191,10 +201,16 @@ mod tests { use super::*; + fn frontend_backend_pair() -> (Endpoint>, Backend) { + let (p1, p2) = UnixStream::pair().unwrap(); + let backend = Backend::from_stream(p1); + let frontend = Endpoint::>::from_stream(p2); + (frontend, backend) + } + #[test] fn test_backend_req_set_failed() { - let (p1, _p2) = UnixStream::pair().unwrap(); - let backend = Backend::from_stream(p1); + let (_, backend) = frontend_backend_pair(); assert!(backend.node().error.is_none()); backend.set_failed(libc::EAGAIN); @@ -203,8 +219,7 @@ mod tests { #[test] fn test_backend_req_send_failure() { - let (p1, _) = UnixStream::pair().unwrap(); - let backend = Backend::from_stream(p1); + let (_, backend) = frontend_backend_pair(); backend.set_failed(libc::ECONNRESET); backend @@ -218,9 +233,7 @@ mod tests { #[test] fn test_backend_req_recv_negative() { - let (p1, p2) = UnixStream::pair().unwrap(); - let backend = Backend::from_stream(p1); - let mut frontend = Endpoint::>::from_stream(p2); + let (mut frontend, backend) = frontend_backend_pair(); let len = mem::size_of::(); let mut hdr = VhostUserMsgHeader::new( @@ -266,4 +279,50 @@ mod tests { .shared_object_add(&VhostUserSharedMsg::default()) .unwrap(); } + + #[test] + fn test_shmem_map() { + let (mut fronted, backend) = frontend_backend_pair(); + + let (_, some_fd_to_send) = UnixStream::pair().unwrap(); + let map_request = VhostUserMMap { + shmid: 0, + padding: Default::default(), + fd_offset: 0, + shm_offset: 1028, + len: 4096, + flags: (VhostUserMMapFlags::MAP_R | VhostUserMMapFlags::MAP_W).bits(), + }; + + backend.shmem_map(&map_request, &some_fd_to_send).unwrap(); + + let (hdr, request, fd) = fronted.recv_body::().unwrap(); + assert_eq!(hdr.get_code().unwrap(), BackendReq::SHMEM_MAP); + assert!(fd.is_some()); + assert_eq!({ request.shm_offset }, { map_request.shm_offset }); + assert_eq!({ request.len }, { map_request.len },); + assert_eq!({ request.flags }, { map_request.flags }); + } + + #[test] + fn test_shmem_unmap() { + let (mut frontend, backend) = frontend_backend_pair(); + + let unmap_request = VhostUserMMap { + shmid: 0, + padding: Default::default(), + fd_offset: 0, + shm_offset: 1028, + len: 4096, + flags: 0, + }; + + backend.shmem_unmap(&unmap_request).unwrap(); + + let (hdr, request, fd) = frontend.recv_body::().unwrap(); + assert_eq!(hdr.get_code().unwrap(), BackendReq::SHMEM_UNMAP); + assert!(fd.is_none()); + assert_eq!({ request.shm_offset }, { unmap_request.shm_offset }); + assert_eq!({ request.len }, { unmap_request.len }); + } } diff --git a/vhost/src/vhost_user/frontend_req_handler.rs b/vhost/src/vhost_user/frontend_req_handler.rs index 20a81b67..e80692f0 100644 --- a/vhost/src/vhost_user/frontend_req_handler.rs +++ b/vhost/src/vhost_user/frontend_req_handler.rs @@ -52,6 +52,16 @@ pub trait VhostUserFrontendReqHandler { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } + /// Handle shared memory region mapping requests. + fn shmem_map(&self, _req: &VhostUserMMap, _fd: &dyn AsRawFd) -> HandlerResult { + Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) + } + + /// Handle shared memory region unmapping requests. + fn shmem_unmap(&self, _req: &VhostUserMMap) -> HandlerResult { + Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) + } + // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb); // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: &dyn AsRawFd); } @@ -84,6 +94,16 @@ pub trait VhostUserFrontendReqHandlerMut { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } + /// Handle shared memory region mapping requests. + fn shmem_map(&mut self, _req: &VhostUserMMap, _fd: &dyn AsRawFd) -> HandlerResult { + Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) + } + + /// Handle shared memory region unmapping requests. + fn shmem_unmap(&mut self, _req: &VhostUserMMap) -> HandlerResult { + Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) + } + // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb); // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: RawFd); } @@ -111,6 +131,14 @@ impl VhostUserFrontendReqHandler for Mutex ) -> HandlerResult { self.lock().unwrap().shared_object_lookup(uuid, fd) } + + fn shmem_map(&self, req: &VhostUserMMap, fd: &dyn AsRawFd) -> HandlerResult { + self.lock().unwrap().shmem_map(req, fd) + } + + fn shmem_unmap(&self, req: &VhostUserMMap) -> HandlerResult { + self.lock().unwrap().shmem_unmap(req) + } } /// Server to handle service requests from backends from the backend communication channel. @@ -241,6 +269,18 @@ impl FrontendReqHandler { .shared_object_lookup(&msg, &files.unwrap()[0]) .map_err(Error::ReqHandlerError) } + Ok(BackendReq::SHMEM_MAP) => { + let msg = self.extract_msg_body::(&hdr, size, &buf)?; + self.backend + .shmem_map(&msg, &files.unwrap()[0]) + .map_err(Error::ReqHandlerError) + } + Ok(BackendReq::SHMEM_UNMAP) => { + let msg = self.extract_msg_body::(&hdr, size, &buf)?; + self.backend + .shmem_unmap(&msg) + .map_err(Error::ReqHandlerError) + } _ => Err(Error::InvalidMessage), }; @@ -278,7 +318,7 @@ impl FrontendReqHandler { files: &Option>, ) -> Result<()> { match hdr.get_code() { - Ok(BackendReq::SHARED_OBJECT_LOOKUP) => { + Ok(BackendReq::SHARED_OBJECT_LOOKUP | BackendReq::SHMEM_MAP) => { // Expect a single file is passed. match files { Some(files) if files.len() == 1 => Ok(()), @@ -356,6 +396,7 @@ mod tests { use super::*; use std::collections::HashSet; + use std::io::ErrorKind; use uuid::Uuid; @@ -366,12 +407,14 @@ mod tests { struct MockFrontendReqHandler { shared_objects: HashSet, + shmem_mappings: HashSet<(u64, u64)>, } impl MockFrontendReqHandler { fn new() -> Self { Self { shared_objects: HashSet::new(), + shmem_mappings: HashSet::new(), } } } @@ -395,6 +438,82 @@ mod tests { } Ok(1) } + + fn shmem_map(&mut self, req: &VhostUserMMap, _fd: &dyn AsRawFd) -> HandlerResult { + assert_eq!({ req.shmid }, 0); + Ok(!self.shmem_mappings.insert((req.shm_offset, req.len)) as u64) + } + + fn shmem_unmap(&mut self, req: &VhostUserMMap) -> HandlerResult { + assert_eq!({ req.shmid }, 0); + Ok(!self.shmem_mappings.remove(&(req.shm_offset, req.len)) as u64) + } + } + + #[test] + fn test_default_frontend_impl() { + struct Frontend; + impl VhostUserFrontendReqHandler for Frontend {} + + let f = Frontend; + assert_eq!( + f.shared_object_add(&Default::default()).unwrap_err().kind(), + ErrorKind::Unsupported + ); + assert_eq!( + f.shared_object_remove(&Default::default()) + .unwrap_err() + .kind(), + ErrorKind::Unsupported + ); + assert_eq!( + f.shared_object_lookup(&Default::default(), &0) + .unwrap_err() + .kind(), + ErrorKind::Unsupported + ); + + assert_eq!( + f.shmem_map(&Default::default(), &0).unwrap_err().kind(), + ErrorKind::Unsupported + ); + assert_eq!( + f.shmem_unmap(&Default::default()).unwrap_err().kind(), + ErrorKind::Unsupported + ); + } + + #[test] + fn test_default_frontend_impl_mut() { + struct FrontendMut; + impl VhostUserFrontendReqHandlerMut for FrontendMut {} + + let mut f = FrontendMut; + assert_eq!( + f.shared_object_add(&Default::default()).unwrap_err().kind(), + ErrorKind::Unsupported + ); + assert_eq!( + f.shared_object_remove(&Default::default()) + .unwrap_err() + .kind(), + ErrorKind::Unsupported + ); + assert_eq!( + f.shared_object_lookup(&Default::default(), &0) + .unwrap_err() + .kind(), + ErrorKind::Unsupported + ); + + assert_eq!( + f.shmem_map(&Default::default(), &0).unwrap_err().kind(), + ErrorKind::Unsupported + ); + assert_eq!( + f.shmem_unmap(&Default::default()).unwrap_err().kind(), + ErrorKind::Unsupported + ); } #[test] @@ -436,6 +555,13 @@ mod tests { assert_eq!(handler.handle_request().unwrap(), 1); assert_eq!(handler.handle_request().unwrap(), 0); assert_eq!(handler.handle_request().unwrap(), 1); + + // Testing shmem map/unmap messages. + assert_eq!(handler.handle_request().unwrap(), 0); + assert_eq!(handler.handle_request().unwrap(), 1); + assert_eq!(handler.handle_request().unwrap(), 0); + assert_eq!(handler.handle_request().unwrap(), 0); + assert_eq!(handler.handle_request().unwrap(), 0); }); backend.set_shared_object_flag(true); @@ -456,6 +582,24 @@ mod tests { .is_ok()); assert!(backend.shared_object_remove(&shobj_msg).is_ok()); assert!(backend.shared_object_remove(&shobj_msg).is_ok()); + + let (_, some_fd_to_map) = UnixStream::pair().unwrap(); + let map_request1 = VhostUserMMap { + shm_offset: 0, + len: 4096, + ..Default::default() + }; + let map_request2 = VhostUserMMap { + shm_offset: 4096, + len: 8192, + ..Default::default() + }; + backend.shmem_map(&map_request1, &some_fd_to_map).unwrap(); + backend.shmem_unmap(&map_request2).unwrap(); + backend.shmem_map(&map_request2, &some_fd_to_map).unwrap(); + backend.shmem_unmap(&map_request2).unwrap(); + backend.shmem_unmap(&map_request1).unwrap(); + // Ensure that the handler thread did not panic. assert!(frontend_handler.join().is_ok()); } @@ -485,6 +629,13 @@ mod tests { assert_eq!(handler.handle_request().unwrap(), 1); assert_eq!(handler.handle_request().unwrap(), 0); assert_eq!(handler.handle_request().unwrap(), 1); + + // Testing shmem map/unmap messages. + assert_eq!(handler.handle_request().unwrap(), 0); + assert_eq!(handler.handle_request().unwrap(), 1); + assert_eq!(handler.handle_request().unwrap(), 0); + assert_eq!(handler.handle_request().unwrap(), 0); + assert_eq!(handler.handle_request().unwrap(), 0); }); backend.set_reply_ack_flag(true); @@ -506,6 +657,24 @@ mod tests { .is_err()); assert!(backend.shared_object_remove(&shobj_msg).is_ok()); assert!(backend.shared_object_remove(&shobj_msg).is_err()); + + let (_, some_fd_to_map) = UnixStream::pair().unwrap(); + let map_request1 = VhostUserMMap { + shm_offset: 0, + len: 4096, + ..Default::default() + }; + let map_request2 = VhostUserMMap { + shm_offset: 4096, + len: 8192, + ..Default::default() + }; + backend.shmem_map(&map_request1, &some_fd_to_map).unwrap(); + backend.shmem_unmap(&map_request2).unwrap_err(); + backend.shmem_map(&map_request2, &some_fd_to_map).unwrap(); + backend.shmem_unmap(&map_request2).unwrap(); + backend.shmem_unmap(&map_request1).unwrap(); + // Ensure that the handler thread did not panic. assert!(frontend_handler.join().is_ok()); } diff --git a/vhost/src/vhost_user/message.rs b/vhost/src/vhost_user/message.rs index 68333afb..881b114b 100644 --- a/vhost/src/vhost_user/message.rs +++ b/vhost/src/vhost_user/message.rs @@ -194,6 +194,10 @@ enum_value! { SHARED_OBJECT_REMOVE = 7, /// Lookup for a virtio shared object. SHARED_OBJECT_LOOKUP = 8, + /// Map memory into guest address space + SHMEM_MAP = 9, + /// Unmap memory from guest address space + SHMEM_UNMAP = 10, } } @@ -990,6 +994,48 @@ impl VhostUserMsgValidator for VhostUserTransferDeviceState { } } +// Bit mask for flags in VhostUserMMap struct +bitflags! { + #[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] + /// Flags specifying access permissions of memory mapping of a file + pub struct VhostUserMMapFlags: u64 { + /// Empty permission. + const EMPTY = 0x0; + /// Read permission. + const MAP_R = 0x1; + /// Write permission. + const MAP_W = 0x2; + } +} + +/// Backend request to mmap a file-backed buffer into guest memory +#[repr(C, packed)] +#[derive(Debug, Copy, Clone, Default)] +pub struct VhostUserMMap { + /// Shared memory region ID. + pub shmid: u8, + /// Struct padding. + pub padding: [u8; 7], + /// File offset. + pub fd_offset: u64, + /// Offset into the shared memory region. + pub shm_offset: u64, + /// Size of region to map. + pub len: u64, + /// Flags for the mmap operation + pub flags: u64, +} + +// SAFETY: Safe because all fields of VhostUserBackendMapMsg are POD. +unsafe impl ByteValued for VhostUserMMap {} + +impl VhostUserMsgValidator for VhostUserMMap { + fn is_valid(&self) -> bool { + (self.flags & !VhostUserMMapFlags::all().bits()) == 0 + && self.fd_offset.checked_add(self.len).is_some() + && self.shm_offset.checked_add(self.len).is_some() + } +} /// Inflight I/O descriptor state for split virtqueues #[repr(C, packed)] #[derive(Clone, Copy, Default)]