diff --git a/CHANGELOG.MD b/CHANGELOG.MD new file mode 100644 index 00000000..9f2b8594 --- /dev/null +++ b/CHANGELOG.MD @@ -0,0 +1,7 @@ +# Changelog + +## [Unreleased] + +### Removed + +- Removed the `kvm_ioctls::CpuId::from_entries` method. \ No newline at end of file diff --git a/src/ioctls/common/kvm_vec.rs b/src/ioctls/common/kvm_vec.rs new file mode 100644 index 00000000..cddd8791 --- /dev/null +++ b/src/ioctls/common/kvm_vec.rs @@ -0,0 +1,665 @@ +// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +// +// Portions Copyright 2017 The Chromium OS Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the THIRD-PARTY file. + +use kvm_bindings::__IncompleteArrayField; +use std::mem; +use std::mem::size_of; + +/// Errors associated with the KvmVec struct. +#[derive(Debug, Clone)] +pub enum Error { + /// The max size has been exceeded + SizeLimitExceeded, +} + +/// Trait for accessing some properties of certain KVM structures that resemble an array. +/// +/// The kvm API has many structs that resemble the following `MockKvmArray` structure: +/// +/// # Example +/// +/// ``` +/// extern crate kvm_bindings; +/// use kvm_bindings::*; +/// +/// use kvm_ioctls::{KvmArray, KvmVec}; +/// +/// const MAX_LEN: usize = 100; +/// +/// #[repr(C)] +/// #[derive(Default)] +/// struct MockKvmArray { +/// pub len: __u32, +/// pub padding: __u32, +/// pub entries: __IncompleteArrayField<__u32>, +/// } +/// +/// impl KvmArray for MockKvmArray { +/// type Entry = u32; +/// +/// fn len(&self) -> usize { +/// self.len as usize +/// } +/// +/// fn set_len(&mut self, len: usize) { +/// self.len = len as u32 +/// } +/// +/// fn max_len() -> usize { +/// MAX_LEN +/// } +/// +/// fn entries(&self) -> &__IncompleteArrayField { +/// &self.entries +/// } +/// +/// fn entries_mut(&mut self) -> &mut __IncompleteArrayField { +/// &mut self.entries +/// } +/// } +/// ``` +#[allow(clippy::len_without_is_empty)] +pub trait KvmArray { + /// The type of the __IncompleteArrayField entries + type Entry: PartialEq + Copy; + + /// Get the array length + /// + fn len(&self) -> usize; + + /// Get the array length as mut + /// + fn set_len(&mut self, len: usize); + + /// Get max array length + /// + fn max_len() -> usize; + + /// Get the array entries + /// + fn entries(&self) -> &__IncompleteArrayField; + + /// Get the array entries as mut + /// + fn entries_mut(&mut self) -> &mut __IncompleteArrayField; +} + +/// An adapter that helps in treating a KvmArray similarly to an actual `Vec`. +/// +pub struct KvmVec { + // this variable holds the KvmArray structure. We use a `Vec` To make the allocation + // large enough while still being aligned for `T`. Only the first element of `Vec` will + // actually be used as a `T`. The remaining memory in the `Vec` is for `entries`, which + // must be contiguous. Since the entries are of type `KvmArray::Entry` + // we must be careful to convert the desired capacity of the `KvmVec` + // from `KvmArray::Entry` to `T` when reserving or releasing memory. + mem_allocator: Vec, + // the number of elements of type `KvmArray::Entry` currently in the vec + len: usize, + // the capacity of the `KvmVec` measured in elements of type `KvmArray::Entry` + capacity: usize, +} + +impl KvmVec { + /// Get the capacity required by mem_allocator in order to hold + /// the provided number of `KvmArray::Entry` + /// + fn kvm_vec_len_to_mem_allocator_len(kvm_vec_len: usize) -> usize { + let kvm_vec_size_in_bytes = size_of::() + kvm_vec_len * size_of::(); + (kvm_vec_size_in_bytes + size_of::() - 1) / size_of::() + } + + /// Get the number of elements of type `KvmArray::Entry` that fit + /// in a mem_allocator of provided len + /// + fn mem_allocator_len_to_kvm_vec_len(mem_allocator_len: usize) -> usize { + if mem_allocator_len == 0 { + return 0; + } + + let array_size_in_bytes = (mem_allocator_len - 1) * size_of::(); + array_size_in_bytes / size_of::() + } + + /// Constructs a new KvmVec that contains `num_elements` empty elements + /// of type `KvmArray::Entry` + /// + /// # Arguments + /// + /// * `num_elements` - The number of empty elements of type `KvmArray::Entry` in the initial `KvmVec` + /// + /// # Example + /// + /// ``` + /// extern crate kvm_bindings; + /// use kvm_bindings::*; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// use kvm_ioctls::{KvmArray, KvmVec, CpuId}; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// fn example() { + /// let cpuid = CpuId::new(3); + /// assert_eq!(cpuid.as_entries_slice().len(), 3); + /// for entry in cpuid.as_entries_slice().iter() { + /// assert_eq!(*entry, kvm_cpuid_entry2::default()) + /// } + /// } + /// ``` + /// + pub fn new(num_elements: usize) -> KvmVec { + let required_mem_allocator_capacity = + KvmVec::::kvm_vec_len_to_mem_allocator_len(num_elements); + + let mut mem_allocator = Vec::with_capacity(required_mem_allocator_capacity); + for _ in 0..required_mem_allocator_capacity { + mem_allocator.push(T::default()) + } + mem_allocator[0].set_len(num_elements); + + KvmVec { + mem_allocator, + len: num_elements, + capacity: num_elements, + } + } + + /// Creates a new `KvmVec` structure based on a supplied vector of `KvmArray::Entry`. + /// + /// # Arguments + /// + /// * `entries` - The vector of `KvmArray::Entry` entries. + /// + /// # Example + /// + /// ``` + /// extern crate kvm_bindings; + /// use kvm_bindings::*; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// use kvm_ioctls::{KvmArray, KvmVec, CpuId}; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// fn example() { + /// // Create a vec to hold one cpuid entry. + /// let mut cpuid_entries = Vec::new(); + /// let new_entry = kvm_cpuid_entry2 { + /// function: 0x4, + /// index: 0, + /// flags: 1, + /// eax: 0b1100000, + /// ebx: 0, + /// ecx: 0, + /// edx: 0, + /// padding: [0, 0, 0], + /// }; + /// cpuid_entries.push(new_entry); + /// let cpuid = CpuId::from_entries(&cpuid_entries); + /// } + /// ``` + /// + pub fn from_entries(entries: &[T::Entry]) -> KvmVec { + let mut kvm_vec = KvmVec::::new(entries.len()); + + { + let kvm_vec_entries = kvm_vec.as_mut_kvm_struct().entries_mut(); + // this is safe because the provided length is correct + let kvm_vec_entries_slice = unsafe { kvm_vec_entries.as_mut_slice(entries.len()) }; + kvm_vec_entries_slice.copy_from_slice(entries); + } + + kvm_vec + } + + /// Get a reference to the actual KVM structure instance. + /// + pub fn as_kvm_struct(&self) -> &T { + &self.mem_allocator[0] + } + + /// Get a mut reference to the actual KVM structure instance. + /// + pub fn as_mut_kvm_struct(&mut self) -> &mut T { + &mut self.mem_allocator[0] + } + + /// Get a pointer to the KVM struct so it can be passed to the kernel. + /// + pub fn as_ptr(&self) -> *const T { + self.as_kvm_struct() + } + + /// Get a mutable pointer to the KVM struct so it can be passed to the kernel. + /// + pub fn as_mut_ptr(&mut self) -> *mut T { + self.as_mut_kvm_struct() + } + + /// Get a mut `Vec` that contains all the elements. + /// It is important to call `mem::forget` after using this vector. + /// Otherwise rust will destroy it. + /// + fn as_vec(&mut self) -> Vec { + unsafe { + let entries_ptr = self.as_mut_kvm_struct().entries_mut().as_mut_ptr(); + // This is safe since self.len and self.capacity should be correct + Vec::from_raw_parts(entries_ptr, self.len, self.capacity as usize) + } + } + + /// Get the mutable elements slice so they can be modified before passing to the VCPU. + /// + /// # Example + /// ``` + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES}; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// fn example() { + /// let kvm = Kvm::new().unwrap(); + /// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); + /// let cpuid_entries = cpuid.as_entries_slice(); + /// } + /// ``` + pub fn as_entries_slice(&self) -> &[T::Entry] { + let len = self.as_kvm_struct().len(); + unsafe { self.as_kvm_struct().entries().as_slice(len as usize) } + } + + /// Get the mutable elements slice so they can be modified before passing to the VCPU. + /// + /// # Example + /// ``` + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES}; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// fn example() { + /// let kvm = Kvm::new().unwrap(); + /// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); + /// let cpuid_entries = cpuid.as_mut_entries_slice(); + /// } + /// ``` + /// + pub fn as_mut_entries_slice(&mut self) -> &mut [T::Entry] { + let len = self.as_kvm_struct().len(); + unsafe { + self.as_mut_kvm_struct() + .entries_mut() + .as_mut_slice(len as usize) + } + } + + /// Reserves capacity for at least `additional` more `KvmArray::Entry` elements. + /// If the capacity is already reserved, this method doesn't do anything + /// + fn reserve(&mut self, additional: usize) { + let desired_capacity = self.len + additional; + if desired_capacity <= self.capacity { + return; + } + + let current_mem_allocator_len = self.mem_allocator.len(); + let required_mem_allocator_len = + KvmVec::::kvm_vec_len_to_mem_allocator_len(desired_capacity); + let additional_mem_allocator_len = required_mem_allocator_len - current_mem_allocator_len; + + self.mem_allocator.reserve(additional_mem_allocator_len); + self.capacity = + KvmVec::::mem_allocator_len_to_kvm_vec_len(self.mem_allocator.capacity()); + } + + /// Updates the length of `self` to the specified value. + /// Also updates the length of the `T::Entry` structure and of `self.mem_allocator` accordingly. + /// + fn update_len(&mut self, len: usize) { + self.len = len; + self.as_mut_kvm_struct().set_len(len); + + /// We need to set the len of the mem_allocator to be the number of T elements needed + /// to fit an array of `len` elements of type `T::Entry`. This way, when we call + /// `self.mem_allocator.shrink_to_fit()` only the unnecessary memory will be released. + let required_mem_allocator_len = KvmVec::::kvm_vec_len_to_mem_allocator_len(len); + unsafe { + self.mem_allocator.set_len(required_mem_allocator_len); + } + } + + /// Appends an element to the end of the collection and updates `len`. + /// + /// # Arguments + /// + /// * `entry` - The element that will be appended to the end of the collection. + /// + /// # Error: When len is already equal to max possible len it returns Error::SizeLimitExceeded + /// + /// # Example + /// ``` + /// extern crate kvm_bindings; + /// use kvm_bindings::*; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// use kvm_ioctls::{KvmArray, KvmVec, CpuId}; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// fn example() { + /// let mut cpuid = CpuId::new(3); + /// cpuid.push(kvm_cpuid_entry2 { + /// function: 1, + /// index: 0, + /// flags: 0, + /// eax: 0, + /// ebx: 0, + /// ecx: 0, + /// edx: 0, + /// padding: [0, 0, 0] + /// }); + /// assert_eq!(cpuid.as_entries_slice()[3].function, 1) + /// } + /// ``` + /// + pub fn push(&mut self, entry: T::Entry) -> Result<(), Error> { + let desired_len = self.len + 1; + if desired_len > T::max_len() { + return Err(Error::SizeLimitExceeded); + } + + self.reserve(1); + + let mut entries = self.as_vec(); + entries.push(entry); + self.update_len(desired_len); + + mem::forget(entries); + + Ok(()) + } + + /// Retains only the elements specified by the predicate. + /// + /// # Arguments + /// + /// * `f` - The function used to evaluate whether an entry will be kept or not. + /// When `f` returns `true` the entry is kept. + /// + /// # Example + /// ``` + /// extern crate kvm_bindings; + /// use kvm_bindings::*; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// use kvm_ioctls::{KvmArray, KvmVec, CpuId}; + /// + /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// fn example() { + /// let mut cpuid = CpuId::new(3); + /// cpuid.retain(|entry| { + /// entry.function != 0 + /// }); + /// assert_eq!(cpuid.as_entries_slice().len(), 0); + /// } + /// ``` + /// + pub fn retain

(&mut self, f: P) + where + P: FnMut(&T::Entry) -> bool, + { + let mut entries = self.as_vec(); + entries.retain(f); + + self.update_len(entries.len()); + self.mem_allocator.shrink_to_fit(); + self.capacity = + KvmVec::::mem_allocator_len_to_kvm_vec_len(self.mem_allocator.capacity()); + + mem::forget(entries); + } +} + +impl PartialEq for KvmVec { + fn eq(&self, other: &KvmVec) -> bool { + self.len == other.len && self.as_entries_slice() == other.as_entries_slice() + } +} + +impl Clone for KvmVec { + fn clone(&self) -> Self { + let mut clone = KvmVec::::new(self.len); + + let num_bytes = self.mem_allocator.len() * size_of::(); + let src_byte_slice = + unsafe { std::slice::from_raw_parts(self.as_ptr() as *const u8, num_bytes) }; + let dst_byte_slice = + unsafe { std::slice::from_raw_parts_mut(clone.as_mut_ptr() as *mut u8, num_bytes) }; + dst_byte_slice.copy_from_slice(src_byte_slice); + + clone + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kvm_bindings::*; + + const MAX_LEN: usize = 100; + + #[repr(C)] + #[derive(Default)] + struct MockKvmArray { + pub len: __u32, + pub padding: __u32, + pub entries: __IncompleteArrayField<__u32>, + } + + impl KvmArray for MockKvmArray { + type Entry = u32; + + fn len(&self) -> usize { + self.len as usize + } + + fn set_len(&mut self, len: usize) { + self.len = len as u32 + } + + fn max_len() -> usize { + MAX_LEN + } + + fn entries(&self) -> &__IncompleteArrayField { + &self.entries + } + + fn entries_mut(&mut self) -> &mut __IncompleteArrayField { + &mut self.entries + } + } + + type MockKvmVec = KvmVec; + + const ENTRIES_OFFSET: usize = 2; + + const KVM_VEC_LEN_TO_MEM_ALLOCATOR_LEN: &'static [(usize, usize)] = &[ + (0, 1), + (1, 2), + (2, 2), + (3, 3), + (4, 3), + (5, 4), + (10, 6), + (50, 26), + (100, 51), + ]; + + const MEM_ALLOCATOR_LEN_TO_KVM_VEC_LEN: &'static [(usize, usize)] = &[ + (0, 0), + (1, 0), + (2, 2), + (3, 4), + (4, 6), + (5, 8), + (10, 18), + (50, 98), + (100, 198), + ]; + + #[test] + fn test_kvm_vec_len_to_mem_allocator_len() { + for pair in KVM_VEC_LEN_TO_MEM_ALLOCATOR_LEN { + let kvm_vec_len = pair.0; + let mem_allocator_len = pair.1; + assert_eq!( + mem_allocator_len, + MockKvmVec::kvm_vec_len_to_mem_allocator_len(kvm_vec_len) + ); + } + } + + #[test] + fn test_mem_allocator_len_to_kvm_vec_len() { + for pair in MEM_ALLOCATOR_LEN_TO_KVM_VEC_LEN { + let mem_allocator_len = pair.0; + let kvm_vec_len = pair.1; + assert_eq!( + kvm_vec_len, + MockKvmVec::mem_allocator_len_to_kvm_vec_len(mem_allocator_len) + ); + } + } + + #[test] + fn test_new() { + let num_entries = 10; + + let kvm_vec = MockKvmVec::new(num_entries); + assert_eq!(num_entries, kvm_vec.capacity); + + let u32_slice = unsafe { + std::slice::from_raw_parts(kvm_vec.as_ptr() as *const u32, num_entries + ENTRIES_OFFSET) + }; + assert_eq!(num_entries, u32_slice[0] as usize); + for entry in u32_slice[1..].iter() { + assert_eq!(*entry, 0); + } + } + + #[test] + fn test_from_entries() { + let num_entries: usize = 10; + + let mut entries = Vec::new(); + for i in 0..num_entries { + entries.push(i as u32); + } + + let kvm_vec = MockKvmVec::from_entries(entries.as_slice()); + let u32_slice = unsafe { + std::slice::from_raw_parts(kvm_vec.as_ptr() as *const u32, num_entries + ENTRIES_OFFSET) + }; + assert_eq!(num_entries, u32_slice[0] as usize); + for i in 0..num_entries { + assert_eq!(kvm_vec.as_entries_slice()[i], entries[i]); + } + } + + #[test] + fn test_entries_slice() { + let num_entries = 10; + let mut kvm_vec = MockKvmVec::new(num_entries); + + let expected_slice = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + + { + let mut mut_entries_slice = kvm_vec.as_mut_entries_slice(); + mut_entries_slice.copy_from_slice(expected_slice); + } + + let u32_slice = unsafe { + std::slice::from_raw_parts(kvm_vec.as_ptr() as *const u32, num_entries + ENTRIES_OFFSET) + }; + assert_eq!(expected_slice, &u32_slice[ENTRIES_OFFSET..]); + assert_eq!(expected_slice, kvm_vec.as_entries_slice()); + } + + #[test] + fn test_reserve() { + let mut kvm_vec = MockKvmVec::new(0); + + // test that the right capacity is reserved + for pair in KVM_VEC_LEN_TO_MEM_ALLOCATOR_LEN { + let num_elements = pair.0; + let required_mem_allocator_len = pair.1; + + let kvm_vec_capacity = kvm_vec.capacity; + kvm_vec.reserve(num_elements); + + assert!(kvm_vec.mem_allocator.capacity() >= required_mem_allocator_len); + assert_eq!(0, kvm_vec.len); + assert!(kvm_vec.capacity >= num_elements); + } + + // test that when the capacity is already reserved, the method doesn't do anything + let current_capacity = kvm_vec.capacity; + kvm_vec.reserve(current_capacity - 1); + assert_eq!(current_capacity, kvm_vec.capacity); + } + + #[test] + fn test_push() { + let mut kvm_vec = MockKvmVec::new(0); + + for i in 0..MAX_LEN { + assert!(kvm_vec.push(i as u32).is_ok()); + assert_eq!(kvm_vec.as_entries_slice()[i], i as u32); + } + + assert!(kvm_vec.push(0).is_err()); + } + + #[test] + fn test_retain() { + let mut kvm_vec = MockKvmVec::new(0); + + for i in 0..MAX_LEN { + assert!(kvm_vec.push(i as u32).is_ok()); + } + + kvm_vec.retain(|entry| entry % 2 == 0); + + for entry in kvm_vec.as_entries_slice().iter() { + assert_eq!(0, entry % 2); + } + } + + #[test] + fn test_partial_eq() { + let mut kvm_vec_1 = MockKvmVec::new(0); + let mut kvm_vec_2 = MockKvmVec::new(0); + let mut kvm_vec_3 = MockKvmVec::new(0); + + for i in 0..MAX_LEN { + assert!(kvm_vec_1.push(i as u32).is_ok()); + assert!(kvm_vec_2.push(i as u32).is_ok()); + assert!(kvm_vec_3.push(0).is_ok()); + } + + assert!(kvm_vec_1 == kvm_vec_2); + assert!(kvm_vec_1 != kvm_vec_3); + } + + #[test] + fn test_clone() { + let mut kvm_vec = MockKvmVec::new(0); + + for i in 0..MAX_LEN { + assert!(kvm_vec.push(i as u32).is_ok()); + } + + assert!(kvm_vec == kvm_vec.clone()); + } +} diff --git a/src/ioctls/common/mod.rs b/src/ioctls/common/mod.rs new file mode 100644 index 00000000..9e89be8c --- /dev/null +++ b/src/ioctls/common/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +// +// Portions Copyright 2017 The Chromium OS Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the THIRD-PARTY file. + +pub mod kvm_vec; diff --git a/src/ioctls/mod.rs b/src/ioctls/mod.rs index d5a1cf3e..bb1df1f8 100644 --- a/src/ioctls/mod.rs +++ b/src/ioctls/mod.rs @@ -10,11 +10,15 @@ use std::mem::size_of; use std::os::unix::io::AsRawFd; use std::ptr::null_mut; use std::result; +use MAX_KVM_CPUID_ENTRIES; +use MAX_KVM_MSR_ENTRIES; use kvm_bindings::kvm_run; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -use kvm_bindings::{kvm_cpuid2, kvm_cpuid_entry2}; +use kvm_bindings::{__IncompleteArrayField, kvm_cpuid2, kvm_cpuid_entry2, kvm_msr_list}; +/// Helper for dealing with KVM api structures +pub mod common; /// Wrappers over KVM device ioctls. pub mod device; /// Wrappers over KVM system ioctls. @@ -24,196 +28,130 @@ pub mod vcpu; /// Wrappers over KVM Virtual Machine ioctls. pub mod vm; +use self::common::kvm_vec::{KvmArray, KvmVec}; + /// A specialized `Result` type for KVM ioctls. /// /// This typedef is generally used to avoid writing out io::Error directly and /// is otherwise a direct mapping to Result. pub type Result = result::Result; -// Returns a `Vec` with a size in bytes at least as large as `size_in_bytes`. -fn vec_with_size_in_bytes(size_in_bytes: usize) -> Vec { - let rounded_size = (size_in_bytes + size_of::() - 1) / size_of::(); - let mut v = Vec::with_capacity(rounded_size); - for _ in 0..rounded_size { - v.push(T::default()) - } - v -} - -// The kvm API has many structs that resemble the following `Foo` structure: -// -// ``` -// #[repr(C)] -// struct Foo { -// some_data: u32 -// entries: __IncompleteArrayField<__u32>, -// } -// ``` -// -// In order to allocate such a structure, `size_of::()` would be too small because it would not -// include any space for `entries`. To make the allocation large enough while still being aligned -// for `Foo`, a `Vec` is created. Only the first element of `Vec` would actually be used -// as a `Foo`. The remaining memory in the `Vec` is for `entries`, which must be contiguous -// with `Foo`. This function is used to make the `Vec` with enough space for `count` entries. -fn vec_with_array_field(count: usize) -> Vec { - let element_space = count * size_of::(); - let vec_size_bytes = size_of::() + element_space; - vec_with_size_in_bytes(vec_size_bytes) -} - -/// Wrapper over the `kvm_cpuid2` structure. -/// -/// The structure has a zero length array at the end, hidden behind bounds check. #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -pub struct CpuId { - // Wrapper over `kvm_cpuid2` from which we only use the first element. - kvm_cpuid: Vec, - // Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2. - allocated_len: usize, -} +impl KvmArray for kvm_cpuid2 { + type Entry = kvm_cpuid_entry2; -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -impl Clone for CpuId { - fn clone(&self) -> Self { - let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len()); - for _ in 0..self.kvm_cpuid.len() { - kvm_cpuid.push(kvm_cpuid2::default()); - } - - let num_bytes = self.kvm_cpuid.len() * size_of::(); + fn len(&self) -> usize { + self.nent as usize + } - let src_byte_slice = - unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) }; + fn set_len(&mut self, len: usize) { + self.nent = len as u32; + } - let dst_byte_slice = - unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) }; + fn max_len() -> usize { + MAX_KVM_CPUID_ENTRIES + } - dst_byte_slice.copy_from_slice(src_byte_slice); + fn entries(&self) -> &__IncompleteArrayField { + &self.entries + } - CpuId { - kvm_cpuid, - allocated_len: self.allocated_len, - } + fn entries_mut(&mut self) -> &mut __IncompleteArrayField { + &mut self.entries } } -#[cfg(test)] +/// Wrapper for `kvm_cpuid2`. +/// +/// The `kvm_cpuid2` structure has a zero sized array. For details check the +/// [KVM API](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt) +/// documentation on `kvm_cpuid2`. To provide safe access to +/// the array elements, this type is implemented using +/// [KvmVec](struct.KvmVec.html). +/// +/// # Example +/// ```rust +/// extern crate kvm_bindings; +/// use kvm_bindings::kvm_cpuid_entry2; +/// +/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES}; +/// let kvm = Kvm::new().unwrap(); +/// // get the supported cpuid from KVM +/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); +/// // remove extended cache topology leafs +/// cpuid.retain(|entry| { +/// return entry.function != 0x8000_001d; +/// }); +/// // add largest extended fn entry +/// cpuid.push(kvm_cpuid_entry2 { +/// function: 0x8000_0000, +/// index: 0, +/// flags: 0, +/// eax: 0x8000_001f, +/// ebx: 0, +/// ecx: 0, +/// edx: 0, +/// padding: [0, 0, 0]} +/// ); +/// // edit features info leaf +/// for entry in cpuid.as_mut_entries_slice().iter_mut() { +/// match entry.function { +/// 0x1 => { +/// entry.eax = 0; +/// } +/// _ => { } +/// } +/// } +/// ``` #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -impl PartialEq for CpuId { - fn eq(&self, other: &CpuId) -> bool { - let entries: &[kvm_cpuid_entry2] = - unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) }; - let other_entries: &[kvm_cpuid_entry2] = - unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) }; - self.allocated_len == other.allocated_len && entries == other_entries - } -} +pub type CpuId = KvmVec; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -impl CpuId { - /// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries. - /// - /// # Arguments - /// - /// * `array_len` - Maximum number of CPUID entries. - /// - /// # Example - /// - /// ``` - /// use kvm_ioctls::CpuId; - /// let cpu_id = CpuId::new(32); - /// ``` - pub fn new(array_len: usize) -> CpuId { - let mut kvm_cpuid = vec_with_array_field::(array_len); - kvm_cpuid[0].nent = array_len as u32; +impl KvmArray for kvm_msr_list { + type Entry = u32; - CpuId { - kvm_cpuid, - allocated_len: array_len, - } + fn len(&self) -> usize { + self.nmsrs as usize } - /// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`. - /// - /// # Arguments - /// - /// * `entries` - The vector of `kvm_cpuid_entry2` entries. - /// - /// # Example - /// - /// ```rust - /// # extern crate kvm_ioctls; - /// extern crate kvm_bindings; - /// - /// use kvm_bindings::kvm_cpuid_entry2; - /// use kvm_ioctls::CpuId; - /// // Create a Cpuid to hold one entry. - /// let mut cpuid = CpuId::new(1); - /// let mut entries = cpuid.mut_entries_slice().to_vec(); - /// let new_entry = kvm_cpuid_entry2 { - /// function: 0x4, - /// index: 0, - /// flags: 1, - /// eax: 0b1100000, - /// ebx: 0, - /// ecx: 0, - /// edx: 0, - /// padding: [0, 0, 0], - /// }; - /// entries.insert(0, new_entry); - /// cpuid = CpuId::from_entries(&entries); - /// ``` - /// - pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId { - let mut kvm_cpuid = vec_with_array_field::(entries.len()); - kvm_cpuid[0].nent = entries.len() as u32; - - unsafe { - kvm_cpuid[0] - .entries - .as_mut_slice(entries.len()) - .copy_from_slice(entries); - } - - CpuId { - kvm_cpuid, - allocated_len: entries.len(), - } + fn set_len(&mut self, len: usize) { + self.nmsrs = len as u32; } - /// Returns the mutable entries slice so they can be modified before passing to the VCPU. - /// - /// # Example - /// ```rust - /// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES}; - /// let kvm = Kvm::new().unwrap(); - /// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); - /// let cpuid_entries = cpuid.mut_entries_slice(); - /// ``` - /// - pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] { - // Mapping the unsized array to a slice is unsafe because the length isn't known. Using - // the length we originally allocated with eliminates the possibility of overflow. - if self.kvm_cpuid[0].nent as usize > self.allocated_len { - self.kvm_cpuid[0].nent = self.allocated_len as u32; - } - let nent = self.kvm_cpuid[0].nent as usize; - unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) } + fn max_len() -> usize { + MAX_KVM_MSR_ENTRIES as usize } - /// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe. - /// - pub fn as_ptr(&self) -> *const kvm_cpuid2 { - &self.kvm_cpuid[0] + fn entries(&self) -> &__IncompleteArrayField { + &self.indices } - /// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe. - /// - pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 { - &mut self.kvm_cpuid[0] + fn entries_mut(&mut self) -> &mut __IncompleteArrayField { + &mut self.indices } } +/// Wrapper for `kvm_msr_list`. +/// +/// The `kvm_msr_list` structure has a zero sized array. For details check the +/// [KVM API](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt) +/// documentation on `kvm_msr_list`. To provide safe access to +/// the array elements, this type is implemented using +/// [KvmVec](struct.KvmVec.html). +/// +/// # Example +/// ```rust +/// use kvm_ioctls::{Kvm}; +/// +/// let kvm = Kvm::new().unwrap(); +/// // get the msr index list from KVM +/// let mut msr_index_list = kvm.get_msr_index_list().unwrap(); +/// // get indexes as u32 slice +/// let indexes = msr_index_list.as_entries_slice(); +/// ``` +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +pub type MsrList = KvmVec; + /// Safe wrapper over the `kvm_run` struct. /// /// The wrapper is needed for sending the pointer to `kvm_run` between @@ -269,36 +207,3 @@ impl KvmRunWrapper { } } } - -#[cfg(test)] -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -mod tests { - use super::*; - - #[test] - fn test_cpuid_from_entries() { - let num_entries = 4; - let mut cpuid = CpuId::new(num_entries); - - // add entry - let mut entries = cpuid.mut_entries_slice().to_vec(); - let new_entry = kvm_cpuid_entry2 { - function: 0x4, - index: 0, - flags: 1, - eax: 0b1100000, - ebx: 0, - ecx: 0, - edx: 0, - padding: [0, 0, 0], - }; - entries.insert(0, new_entry); - cpuid = CpuId::from_entries(&entries); - - // check that the cpuid contains the new entry - assert_eq!(cpuid.allocated_len, num_entries + 1); - assert_eq!(cpuid.kvm_cpuid[0].nent, (num_entries + 1) as u32); - assert_eq!(cpuid.mut_entries_slice().len(), num_entries + 1); - assert_eq!(cpuid.mut_entries_slice()[0], new_entry); - } -} diff --git a/src/ioctls/system.rs b/src/ioctls/system.rs index 2f668fba..9dba16a4 100644 --- a/src/ioctls/system.rs +++ b/src/ioctls/system.rs @@ -14,13 +14,13 @@ use std::os::raw::{c_char, c_ulong}; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use cap::Cap; -use ioctls::vec_with_array_field; use ioctls::vm::{new_vmfd, VmFd}; -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -use ioctls::CpuId; use ioctls::Result; +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +use ioctls::{CpuId, MsrList}; use kvm_ioctls::*; use sys_ioctl::*; +use MAX_KVM_MSR_ENTRIES; /// Wrapper over KVM system ioctls. pub struct Kvm { @@ -266,7 +266,7 @@ impl Kvm { /// /// let kvm = Kvm::new().unwrap(); /// let mut cpuid = kvm.get_emulated_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); - /// let cpuid_entries = cpuid.mut_entries_slice(); + /// let cpuid_entries = cpuid.as_mut_entries_slice(); /// assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); /// ``` /// @@ -291,7 +291,7 @@ impl Kvm { /// /// let kvm = Kvm::new().unwrap(); /// let mut cpuid = kvm.get_emulated_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); - /// let cpuid_entries = cpuid.mut_entries_slice(); + /// let cpuid_entries = cpuid.as_mut_entries_slice(); /// assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); /// ``` /// @@ -307,40 +307,26 @@ impl Kvm { /// # Example /// /// ``` - /// use kvm_ioctls::{Kvm, MAX_KVM_CPUID_ENTRIES}; + /// use kvm_ioctls::{Kvm}; /// /// let kvm = Kvm::new().unwrap(); /// let msr_index_list = kvm.get_msr_index_list().unwrap(); /// ``` #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - pub fn get_msr_index_list(&self) -> Result> { - const MAX_KVM_MSR_ENTRIES: usize = 256; - - let mut msr_list = vec_with_array_field::(MAX_KVM_MSR_ENTRIES); - msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32; + pub fn get_msr_index_list(&self) -> Result { + let mut msr_list = MsrList::new(MAX_KVM_MSR_ENTRIES); let ret = unsafe { // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory // allocated for the struct. The limit is read from nmsrs, which is set to the allocated // size (MAX_KVM_MSR_ENTRIES) above. - ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) + ioctl_with_mut_ptr(self, KVM_GET_MSR_INDEX_LIST(), msr_list.as_mut_ptr()) }; if ret < 0 { return Err(io::Error::last_os_error()); } - let mut nmsrs = msr_list[0].nmsrs; - - // Mapping the unsized array to a slice is unsafe because the length isn't known. Using - // the length we originally allocated with eliminates the possibility of overflow. - let indices: &[u32] = unsafe { - if nmsrs > MAX_KVM_MSR_ENTRIES as u32 { - nmsrs = MAX_KVM_MSR_ENTRIES as u32; - } - msr_list[0].indices.as_slice(nmsrs as usize) - }; - - Ok(indices.to_vec()) + Ok(msr_list) } /// Creates a VM fd using the KVM fd. @@ -425,7 +411,7 @@ mod tests { fn test_get_supported_cpuid() { let kvm = Kvm::new().unwrap(); let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); - let cpuid_entries = cpuid.mut_entries_slice(); + let cpuid_entries = cpuid.as_mut_entries_slice(); assert!(cpuid_entries.len() > 0); assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); } @@ -435,7 +421,7 @@ mod tests { fn test_get_emulated_cpuid() { let kvm = Kvm::new().unwrap(); let mut cpuid = kvm.get_emulated_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); - let cpuid_entries = cpuid.mut_entries_slice(); + let cpuid_entries = cpuid.as_mut_entries_slice(); assert!(cpuid_entries.len() > 0); assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); } @@ -456,7 +442,7 @@ mod tests { fn get_msr_index_list() { let kvm = Kvm::new().unwrap(); let msr_list = kvm.get_msr_index_list().unwrap(); - assert!(msr_list.len() >= 2); + assert!(msr_list.as_entries_slice().len() >= 2); } fn get_raw_errno(result: super::Result) -> i32 { diff --git a/src/ioctls/vcpu.rs b/src/ioctls/vcpu.rs index 129d2a94..24a0fa9e 100644 --- a/src/ioctls/vcpu.rs +++ b/src/ioctls/vcpu.rs @@ -325,7 +325,7 @@ impl VcpuFd { /// // Update the CPUID entries to disable the EPB feature. /// const ECX_EPB_SHIFT: u32 = 3; /// { - /// let entries = kvm_cpuid.mut_entries_slice(); + /// let entries = kvm_cpuid.as_mut_entries_slice(); /// for entry in entries.iter_mut() { /// match entry.function { /// 6 => entry.ecx &= !(1 << ECX_EPB_SHIFT), @@ -797,7 +797,7 @@ mod tests { if kvm.check_extension(Cap::ExtCpuid) { let vm = kvm.create_vm().unwrap(); let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); - assert!(cpuid.mut_entries_slice().len() <= MAX_KVM_CPUID_ENTRIES); + assert!(cpuid.as_mut_entries_slice().len() <= MAX_KVM_CPUID_ENTRIES); let nr_vcpus = kvm.get_nr_vcpus(); for cpu_id in 0..nr_vcpus { let vcpu = vm.create_vcpu(cpu_id as u8).unwrap(); diff --git a/src/lib.rs b/src/lib.rs index 99574e1d..3966db1b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -189,12 +189,13 @@ mod cap; mod ioctls; pub use cap::Cap; +pub use ioctls::common::kvm_vec::{KvmArray, KvmVec}; pub use ioctls::device::DeviceFd; pub use ioctls::system::Kvm; pub use ioctls::vcpu::{VcpuExit, VcpuFd}; pub use ioctls::vm::{IoEventAddress, NoDatamatch, VmFd}; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -pub use ioctls::CpuId; +pub use ioctls::{CpuId, MsrList}; // The following example is used to verify that our public // structures are exported properly. /// # Example @@ -211,3 +212,7 @@ pub use ioctls::{KvmRunWrapper, Result}; /// It can be used for calls to [get_supported_cpuid](struct.Kvm.html#method.get_supported_cpuid) and /// [get_emulated_cpuid](struct.Kvm.html#method.get_emulated_cpuid). pub const MAX_KVM_CPUID_ENTRIES: usize = 80; + +/// Maximum number of MSR entries that can be returned by a call to KVM ioctls. +/// +pub const MAX_KVM_MSR_ENTRIES: usize = 256; diff --git a/tests/coverage b/tests/coverage index 9d1fca31..073c0e67 100644 --- a/tests/coverage +++ b/tests/coverage @@ -1 +1 @@ -91.3 \ No newline at end of file +91.6 \ No newline at end of file