Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add support of VIRTIO_NET_F_MRG_RXBUF #848

Merged
merged 2 commits into from
Aug 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
181 changes: 68 additions & 113 deletions src/drivers/net/virtio_net.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use core::cmp::Ordering;
use core::mem;
use core::result::Result;

use align_address::Align;
use pci_types::InterruptLine;
use smoltcp::phy::{Checksum, ChecksumCapabilities};
use smoltcp::wire::{ETHERNET_HEADER_LEN, IPV4_HEADER_LEN, IPV6_HEADER_LEN};
Expand Down Expand Up @@ -191,92 +192,36 @@ impl RxQueues {
// Safe virtqueue
let rc_vq = Rc::new(vq);
let vq = &rc_vq;
let num_buff: u16 = vq.size().into();

if dev_cfg
let rx_size = if dev_cfg
.features
.is_feature(Features::VIRTIO_NET_F_GUEST_TSO4)
| dev_cfg
.features
.is_feature(Features::VIRTIO_NET_F_GUEST_TSO6)
| dev_cfg
.features
.is_feature(Features::VIRTIO_NET_F_GUEST_UFO)
.is_feature(Features::VIRTIO_NET_F_MRG_RXBUF)
{
// Receive Buffers must be at least 65562 bytes large with these features set.
// See Virtio specification v1.1 - 5.1.6.3.1

// Currently we choose indirect descriptors if possible in order to allow
// as many packages as possible inside the queue.
let buff_def = [
Bytes::new(mem::size_of::<VirtioNetHdr>()).unwrap(),
Bytes::new(65550).unwrap(),
];

let spec = if dev_cfg
.features
.is_feature(Features::VIRTIO_F_RING_INDIRECT_DESC)
{
BuffSpec::Indirect(&buff_def)
} else {
BuffSpec::Single(Bytes::new(mem::size_of::<VirtioNetHdr>() + 65550).unwrap())
};

let num_buff: u16 = vq.size().into();

for _ in 0..num_buff {
let buff_tkn = match vq.prep_buffer(Rc::clone(vq), None, Some(spec.clone())) {
Ok(tkn) => tkn,
Err(_vq_err) => {
error!("Setup of network queue failed, which should not happen!");
panic!("setup of network queue failed!");
}
};

// BufferTokens are directly provided to the queue
// TransferTokens are directly dispatched
// Transfers will be awaited at the queue
buff_tkn
.provide()
.dispatch_await(Rc::clone(&self.poll_queue), false);
}
(1514 + mem::size_of::<VirtioNetHdr>())
.align_up(core::mem::size_of::<crossbeam_utils::CachePadded<u8>>())
} else {
// If above features not set, buffers must be at least 1526 bytes large.
// See Virtio specification v1.1 - 5.1.6.3.1
//
let buff_def = [
Bytes::new(mem::size_of::<VirtioNetHdr>()).unwrap(),
Bytes::new(dev_cfg.raw.get_mtu() as usize).unwrap(),
];
let spec = if dev_cfg
.features
.is_feature(Features::VIRTIO_F_RING_INDIRECT_DESC)
{
BuffSpec::Indirect(&buff_def)
} else {
BuffSpec::Single(
Bytes::new(mem::size_of::<VirtioNetHdr>() + dev_cfg.raw.get_mtu() as usize)
.unwrap(),
)
};

let num_buff: u16 = vq.size().into();
dev_cfg.raw.get_mtu() as usize + mem::size_of::<VirtioNetHdr>()
};

for _ in 0..num_buff {
let buff_tkn = match vq.prep_buffer(Rc::clone(vq), None, Some(spec.clone())) {
Ok(tkn) => tkn,
Err(_vq_err) => {
error!("Setup of network queue failed, which should not happen!");
panic!("setup of network queue failed!");
}
};
// See Virtio specification v1.1 - 5.1.6.3.1
//
let spec = BuffSpec::Single(Bytes::new(rx_size).unwrap());
for _ in 0..num_buff {
let buff_tkn = match vq.prep_buffer(Rc::clone(vq), None, Some(spec.clone())) {
Ok(tkn) => tkn,
Err(_vq_err) => {
error!("Setup of network queue failed, which should not happen!");
panic!("setup of network queue failed!");
}
};

// BufferTokens are directly provided to the queue
// TransferTokens are directly dispatched
// Transfers will be awaited at the queue
buff_tkn
.provide()
.dispatch_await(Rc::clone(&self.poll_queue), false);
}
// BufferTokens are directly provided to the queue
// TransferTokens are directly dispatched
// Transfers will be awaited at the queue
buff_tkn
.provide()
.dispatch_await(Rc::clone(&self.poll_queue), false);
}

// Safe virtqueue
Expand Down Expand Up @@ -631,42 +576,50 @@ impl NetworkDriver for VirtioNetDriver {
let (_, recv_data_opt) = transfer.as_slices().unwrap();
let mut recv_data = recv_data_opt.unwrap();

// If the given length is zero, we currently fail.
if recv_data.len() == 2 {
let recv_payload = recv_data.pop().unwrap();
/*let header = recv_data.pop().unwrap();
let header = unsafe {
const HEADER_SIZE: usize = mem::size_of::<VirtioNetHdr>();
core::mem::transmute::<[u8; HEADER_SIZE], VirtioNetHdr>(
header[..HEADER_SIZE].try_into().unwrap(),
)
};
trace!("Receive data with header {:?}", header);*/

let vec_data = recv_payload.to_vec();
transfer
.reuse()
.unwrap()
.provide()
.dispatch_await(Rc::clone(&self.recv_vqs.poll_queue), false);
// If the given length isn't 1, we currently fail.
if recv_data.len() == 1 {
let mut vec_data: Vec<u8> = Vec::with_capacity(self.mtu.into());
let num_buffers = {
let packet = recv_data.pop().unwrap();
let header = unsafe {
const HEADER_SIZE: usize = mem::size_of::<VirtioNetHdr>();
core::mem::transmute::<[u8; HEADER_SIZE], VirtioNetHdr>(
packet[..HEADER_SIZE].try_into().unwrap(),
)
};
trace!("Header: {:?}", header);
let num_buffers = header.num_buffers;

vec_data.extend_from_slice(&packet[mem::size_of::<VirtioNetHdr>()..]);
transfer
.reuse()
.unwrap()
.provide()
.dispatch_await(Rc::clone(&self.recv_vqs.poll_queue), false);

Some((RxToken::new(vec_data), TxToken::new()))
} else if recv_data.len() == 1 {
let packet = recv_data.pop().unwrap();
/*let header = unsafe {
const HEADER_SIZE: usize = mem::size_of::<VirtioNetHdr>();
core::mem::transmute::<[u8; HEADER_SIZE], VirtioNetHdr>(
packet[..HEADER_SIZE].try_into().unwrap(),
)
num_buffers
};
trace!("Receive data with header {:?}", header);*/

let vec_data = packet[mem::size_of::<VirtioNetHdr>()..].to_vec();
transfer
.reuse()
.unwrap()
.provide()
.dispatch_await(Rc::clone(&self.recv_vqs.poll_queue), false);
for _ in 1..num_buffers {
let transfer =
match RxQueues::post_processing(self.recv_vqs.get_next().unwrap()) {
Ok(trf) => trf,
Err(vnet_err) => {
warn!("Post processing failed. Err: {:?}", vnet_err);
return None;
}
};

let (_, recv_data_opt) = transfer.as_slices().unwrap();
let mut recv_data = recv_data_opt.unwrap();
let packet = recv_data.pop().unwrap();
vec_data.extend_from_slice(packet);
transfer
.reuse()
.unwrap()
.provide()
.dispatch_await(Rc::clone(&self.recv_vqs.poll_queue), false);
}

Some((RxToken::new(vec_data), TxToken::new()))
} else {
Expand Down Expand Up @@ -833,6 +786,8 @@ impl VirtioNetDriver {
feats.push(Features::VIRTIO_NET_F_GUEST_CSUM);
// Host should avoid the creation of checksums
feats.push(Features::VIRTIO_NET_F_CSUM);
// Driver can merge receive buffers
feats.push(Features::VIRTIO_NET_F_MRG_RXBUF);

// Currently the driver does NOT support the features below.
// In order to provide functionality for these, the driver
Expand Down
13 changes: 7 additions & 6 deletions src/drivers/virtio/virtqueue/split.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use alloc::rc::Rc;
use alloc::vec::Vec;
use core::cell::RefCell;
use core::ptr;
use core::sync::atomic::{fence, Ordering};

use align_address::Align;

Expand All @@ -21,6 +20,7 @@ use super::{
AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Pinned,
Transfer, TransferState, TransferToken, Virtq, VqIndex, VqSize,
};
use crate::arch::memory_barrier;
use crate::arch::mm::paging::{BasePageSize, PageSize};
use crate::arch::mm::{paging, VirtAddr};

Expand Down Expand Up @@ -57,7 +57,7 @@ struct AvailRing {

struct UsedRing {
flags: &'static mut u16,
index: &'static mut u16,
index: *mut u16,
ring: &'static mut [UsedElem],
event: &'static mut u16,
}
Expand Down Expand Up @@ -193,16 +193,16 @@ impl DescrRing {
self.avail_ring.ring[*self.avail_ring.index as usize % self.avail_ring.ring.len()] =
index as u16;

fence(Ordering::SeqCst);
memory_barrier();
*self.avail_ring.index = self.avail_ring.index.wrapping_add(1);

(pin, 0, 0)
}

fn poll(&mut self) {
while self.read_idx != *self.used_ring.index {
while self.read_idx != unsafe { ptr::read_volatile(self.used_ring.index) } {
let cur_ring_index = self.read_idx as usize % self.used_ring.ring.len();
let used_elem = self.used_ring.ring[cur_ring_index];
let used_elem = unsafe { ptr::read_volatile(&self.used_ring.ring[cur_ring_index]) };

let tkn = unsafe { &mut *(self.ref_ring[used_elem.id as usize]) };

Expand All @@ -225,6 +225,7 @@ impl DescrRing {
}
None => tkn.state = TransferState::Finished,
}
memory_barrier();
self.read_idx = self.read_idx.wrapping_add(1);
}
}
Expand Down Expand Up @@ -426,7 +427,7 @@ impl SplitVq {
let used_ring = unsafe {
UsedRing {
flags: &mut *(used_raw as *mut u16),
index: &mut *(used_raw.offset(2) as *mut u16),
index: used_raw.offset(2) as *mut u16,
ring: core::slice::from_raw_parts_mut(
(used_raw.offset(4) as *const _) as *mut UsedElem,
size as usize,
Expand Down
10 changes: 8 additions & 2 deletions src/fd/socket/tcp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,15 +248,21 @@ impl<T> Socket<T> {
let slice = unsafe { core::slice::from_raw_parts_mut(buf, len) };

if self.nonblocking.load(Ordering::Acquire) {
block_on(self.async_read(slice), Some(Duration::ZERO)).unwrap_or_else(|x| {
poll_on(self.async_read(slice), Some(Duration::ZERO)).unwrap_or_else(|x| {
if x == -ETIME {
(-EAGAIN).try_into().unwrap()
} else {
x.try_into().unwrap()
}
})
} else {
block_on(self.async_read(slice), None).unwrap_or_else(|x| x.try_into().unwrap())
poll_on(self.async_read(slice), Some(Duration::from_secs(2))).unwrap_or_else(|x| {
if x == -ETIME {
block_on(self.async_read(slice), None).unwrap_or_else(|y| y.try_into().unwrap())
} else {
x.try_into().unwrap()
}
})
}
}

Expand Down