Skip to content
This repository has been archived by the owner on Jan 10, 2025. It is now read-only.

Commit

Permalink
Add support for dynamic stack frames
Browse files Browse the repository at this point in the history
This change implements dynamic stack frames when config.dynamic_stack_frames is
set to true.

When dynamic_stack_frames=true, r11 is exposed as the stack pointer. The stack is
fully descending, so `sub r11, N` can be used to grow it, and `add r11, N` to
shrink it. Those are the only operations allowed on r11, which are special cased
by the interpreter and jit to resize the stack. No other operations are allowed
on the stack register.
  • Loading branch information
alessandrod committed Mar 25, 2022
1 parent 330baea commit 8a7152f
Show file tree
Hide file tree
Showing 9 changed files with 654 additions and 152 deletions.
210 changes: 156 additions & 54 deletions src/call_frames.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use crate::{
/// One call frame
#[derive(Clone, Debug)]
struct CallFrame {
vm_addr: u64,
frame_ptr: u64,
saved_reg: [u64; 4],
return_ptr: usize,
}
Expand All @@ -24,38 +24,46 @@ struct CallFrame {
pub struct CallFrames<'a> {
config: &'a Config,
stack: AlignedMemory,
stack_ptr: u64,
frame_index: usize,
frame_index_max: usize,
frames: Vec<CallFrame>,
}
impl<'a> CallFrames<'a> {
/// New call frame, depth indicates maximum call depth
pub fn new(config: &'a Config) -> Self {
let mut stack =
AlignedMemory::new(config.max_call_depth * config.stack_frame_size, HOST_ALIGN);
stack
.resize(config.max_call_depth * config.stack_frame_size, 0)
.unwrap();
let stack_len = config.stack_size();
let mut stack = AlignedMemory::new(stack_len, HOST_ALIGN);
stack.resize(stack_len, 0).unwrap();

let mut frames = CallFrames {
config,
stack,
stack_ptr: 0,
frame_index: 0,
frame_index_max: 0,
frames: vec![
CallFrame {
vm_addr: 0,
frame_ptr: 0,
saved_reg: [0u64; SCRATCH_REGS],
return_ptr: 0
return_ptr: 0,
};
config.max_call_depth
],
};
// Seperate each stack frame's virtual address so that stack over/under-run is caught explicitly
let gap_factor = if config.enable_stack_frame_gaps { 2 } else { 1 };
for i in 0..config.max_call_depth {
frames.frames[i].vm_addr =
MM_STACK_START + (i * gap_factor * config.stack_frame_size) as u64;

let frame = &mut frames.frames[0];
if config.dynamic_stack_frames {
// the stack is fully descending, frames start as empty and change
// size as resize_stack() is invoked anytime r11 is modified
frame.frame_ptr = MM_STACK_START + stack_len as u64;
frames.stack_ptr = frame.frame_ptr;
} else {
// within a frame the stack grows down, but frames are ascending
frame.frame_ptr = MM_STACK_START + config.stack_frame_size as u64;
frames.stack_ptr = MM_STACK_START;
}

frames
}

Expand All @@ -64,7 +72,7 @@ impl<'a> CallFrames<'a> {
MemoryRegion::new_from_slice(
self.stack.as_slice(),
MM_STACK_START,
if self.config.enable_stack_frame_gaps {
if !self.config.dynamic_stack_frames && self.config.enable_stack_frame_gaps {
self.config.stack_frame_size as u64
} else {
0
Expand All @@ -75,12 +83,20 @@ impl<'a> CallFrames<'a> {

/// Get the vm address of the beginning of each stack frame
pub fn get_frame_pointers(&self) -> Vec<u64> {
self.frames.iter().map(|frame| frame.vm_addr).collect()
self.frames[..=self.frame_index]
.iter()
.map(|frame| frame.frame_ptr)
.collect()
}

/// Get the address of a frame's top of stack
pub fn get_stack_top(&self) -> u64 {
self.frames[self.frame_index].vm_addr + self.config.stack_frame_size as u64
/// Get the frame pointer for the current frame
pub fn get_frame_ptr(&self) -> u64 {
self.frames[self.frame_index].frame_ptr
}

/// Get the stack pointer
pub fn get_stack_ptr(&self) -> u64 {
self.stack_ptr
}

/// Get current call frame index, 0 is the root frame
Expand All @@ -105,11 +121,31 @@ impl<'a> CallFrames<'a> {
self.frames.len(),
));
}

self.frames[self.frame_index].saved_reg[..].copy_from_slice(saved_reg);
let frame_ptr = self.frames[self.frame_index].frame_ptr;
self.frames[self.frame_index].return_ptr = return_ptr;

self.frame_index += 1;

let frame = &mut self.frames[self.frame_index];

if self.config.dynamic_stack_frames {
frame.frame_ptr = self.stack_ptr;
} else {
frame.frame_ptr = frame_ptr
+ self.config.stack_frame_size as u64
* if self.config.enable_stack_frame_gaps {
2
} else {
1
};
self.stack_ptr = frame.frame_ptr - self.config.stack_frame_size as u64;
}

self.frame_index_max = self.frame_index_max.max(self.frame_index);
Ok(self.get_stack_top())

Ok(self.get_frame_ptr())
}

/// Pop a frame
Expand All @@ -122,10 +158,23 @@ impl<'a> CallFrames<'a> {
self.frame_index -= 1;
Ok((
self.frames[self.frame_index].saved_reg,
self.get_stack_top(),
self.get_frame_ptr(),
self.frames[self.frame_index].return_ptr,
))
}

/// Resize the stack
pub fn resize_stack(&mut self, amount: i64) {
debug_assert!(self.config.dynamic_stack_frames);

// Let the stack overflow. For legitimate programs, this is a nearly
// impossible condition to hit since programs are metered and we already
// enforce a maximum call depth. For programs that intentionally mess
// around with the stack pointer, MemoryRegion::map will return
// InvalidVirtualAddress(stack_ptr) once an invalid stack address is
// accessed.
self.stack_ptr = self.stack_ptr.overflowing_add(amount as u64).0;
}
}

#[cfg(test)]
Expand All @@ -135,44 +184,97 @@ mod tests {

#[test]
fn test_frames() {
for (enable_stack_frame_gaps, dynamic_stack_frames) in
[(false, false), (true, false), (false, true)]
{
let config = Config {
max_call_depth: 10,
stack_frame_size: 8,
enable_stack_frame_gaps,
dynamic_stack_frames,
..Config::default()
};
let mut frames = CallFrames::new(&config);
let mut frame_ptrs: Vec<u64> = Vec::new();

for i in 0..config.max_call_depth - 1 {
let registers = vec![i as u64; 4];

assert_eq!(frames.get_frame_index(), i);
frame_ptrs.push(frames.get_frame_pointers()[i]);

let expected_frame_size = if dynamic_stack_frames {
let frame_size = i as i64 * 8;
frames.resize_stack(-frame_size);

frame_size as u64
} else {
config.stack_frame_size as u64 * if enable_stack_frame_gaps { 2 } else { 1 }
};

// push the next frame, get the new frame pointers and check
// that push returns the newly added frame pointer
let top = frames.push::<UserError>(&registers[0..4], i).unwrap();
let new_ptrs = frames.get_frame_pointers();
assert_eq!(top, new_ptrs[i + 1]);

// check that the size of the new frame is what we expect. We
// need to abs() here since dynamic frames grow down but static
// frames grow up.
let frame_size = (frame_ptrs[i] as i64 - new_ptrs[i + 1] as i64).abs() as u64;
assert_eq!(frame_size, expected_frame_size);
}

let i = config.max_call_depth - 1;
let registers = vec![i as u64; 4];
assert_eq!(frames.get_frame_index(), i);
frame_ptrs.push(frames.get_frame_pointers()[i]);

assert!(frames
.push::<UserError>(&registers, config.max_call_depth - 1)
.is_err());

for i in (0..config.max_call_depth - 1).rev() {
let (saved_reg, frame_ptr, return_ptr) = frames.pop::<UserError>().unwrap();
assert_eq!(saved_reg, [i as u64, i as u64, i as u64, i as u64]);
assert_eq!(frame_ptrs[i], frame_ptr);
assert_eq!(i, return_ptr);
}

assert!(frames.pop::<UserError>().is_err());
}
}

#[test]
fn test_stack_ptr_overflow() {
let config = Config {
max_call_depth: 10,
stack_frame_size: 8,
enable_stack_frame_gaps: true,
enable_stack_frame_gaps: false,
dynamic_stack_frames: true,
..Config::default()
};
let mut frames = CallFrames::new(&config);
let mut ptrs: Vec<u64> = Vec::new();
for i in 0..config.max_call_depth - 1 {
let registers = vec![i as u64; config.stack_frame_size];
assert_eq!(frames.get_frame_index(), i);
ptrs.push(frames.get_frame_pointers()[i]);

let top = frames.push::<UserError>(&registers[0..4], i).unwrap();
let new_ptrs = frames.get_frame_pointers();
assert_eq!(top, new_ptrs[i + 1] + config.stack_frame_size as u64);
assert_ne!(top, ptrs[i] + config.stack_frame_size as u64 - 1);
assert!(
!(ptrs[i] <= new_ptrs[i + 1]
&& new_ptrs[i + 1] < ptrs[i] + config.stack_frame_size as u64)
);
}
let i = config.max_call_depth - 1;
let registers = vec![i as u64; config.stack_frame_size];
assert_eq!(frames.get_frame_index(), i);
ptrs.push(frames.get_frame_pointers()[i]);

assert!(frames
.push::<UserError>(&registers, config.max_call_depth - 1)
.is_err());

for i in (0..config.max_call_depth - 1).rev() {
let (saved_reg, stack_ptr, return_ptr) = frames.pop::<UserError>().unwrap();
assert_eq!(saved_reg, [i as u64, i as u64, i as u64, i as u64]);
assert_eq!(ptrs[i] + config.stack_frame_size as u64, stack_ptr);
assert_eq!(i, return_ptr);
}
frames.resize_stack(-(MM_STACK_START as i64 + config.stack_size() as i64));
assert_eq!(frames.get_stack_ptr(), 0);

// test that we overflow the stack without panicking
frames.resize_stack(-2);
assert_eq!(frames.get_stack_ptr(), u64::MAX - 1);
}

#[test]
fn test_stack_ptr_underflow() {
let config = Config {
enable_stack_frame_gaps: false,
dynamic_stack_frames: true,
..Config::default()
};
let mut frames = CallFrames::new(&config);
frames.resize_stack(-(MM_STACK_START as i64 + config.stack_size() as i64));
assert_eq!(frames.get_stack_ptr(), 0);

assert!(frames.pop::<UserError>().is_err());
// test that we underflow the stack without panicking
frames.resize_stack(u64::MAX as i64);
frames.resize_stack(2);
assert_eq!(frames.get_stack_ptr(), 1);
}
}
8 changes: 6 additions & 2 deletions src/ebpf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,16 @@ use byteorder::{ByteOrder, LittleEndian};
use hash32::{Hash, Hasher, Murmur3Hasher};
use std::fmt;

/// SBF version flag
pub const EF_SBF_V2: u32 = 0x20;
/// Maximum number of instructions in an eBPF program.
pub const PROG_MAX_INSNS: usize = 65_536;
/// Size of an eBPF instructions, in bytes.
pub const INSN_SIZE: usize = 8;
/// Stack register
pub const STACK_REG: usize = 10;
/// Frame pointer register
pub const FRAME_PTR_REG: usize = 10;
/// Stack pointer register
pub const STACK_PTR_REG: usize = 11;
/// First scratch register
pub const FIRST_SCRATCH_REG: usize = 6;
/// Number of scratch registers
Expand Down
Loading

0 comments on commit 8a7152f

Please sign in to comment.