|
| 1 | +use crate::machine::Mapping; |
| 2 | +use crate::shims::unix::fs::EvalContextExt as _; |
| 3 | +use crate::*; |
| 4 | +use rustc_target::abi::{Align, Size}; |
| 5 | + |
| 6 | +const PAGE_SIZE: u64 = 4096; |
| 7 | + |
| 8 | +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} |
| 9 | +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { |
| 10 | + fn mmap( |
| 11 | + &mut self, |
| 12 | + addr: &OpTy<'tcx, Provenance>, |
| 13 | + length: &OpTy<'tcx, Provenance>, |
| 14 | + prot: &OpTy<'tcx, Provenance>, |
| 15 | + flags: &OpTy<'tcx, Provenance>, |
| 16 | + fd: &OpTy<'tcx, Provenance>, |
| 17 | + offset: &OpTy<'tcx, Provenance>, |
| 18 | + ) -> InterpResult<'tcx, Pointer<Option<Provenance>>> { |
| 19 | + let this = self.eval_context_mut(); |
| 20 | + |
| 21 | + let addr = this.read_pointer(addr)?; |
| 22 | + let length = this.read_scalar(length)?.to_machine_usize(this)?; |
| 23 | + let prot = this.read_scalar(prot)?.to_i32()?; |
| 24 | + let flags = this.read_scalar(flags)?.to_i32()?; |
| 25 | + let fd = this.read_scalar(fd)?.to_i32()?; |
| 26 | + let offset = this.read_scalar(offset)?.to_machine_usize(this)?; |
| 27 | + |
| 28 | + let print_args = || { |
| 29 | + eprintln!( |
| 30 | + "mmap(addr: {addr}, length: {length}, prot: {prot:x}, flags: {flags:0x}, fd: {fd}, offset: {offset})" |
| 31 | + ); |
| 32 | + }; |
| 33 | + |
| 34 | + let prot_read = this.eval_libc_i32("PROT_READ")?; |
| 35 | + let prot_write = this.eval_libc_i32("PROT_WRITE")?; |
| 36 | + let map_private = this.eval_libc_i32("MAP_PRIVATE")?; |
| 37 | + let map_anonymous = this.eval_libc_i32("MAP_ANONYMOUS")?; |
| 38 | + |
| 39 | + // Only one of MAP_PRIVATE, MAP_SHARED, or MAP_SHARED_VALIDATE may be passed |
| 40 | + if flags & this.eval_libc_i32("MAP_PRIVATE")? == 0 { |
| 41 | + throw_unsup_format!("Miri does not support MAP_SHARED or MAP_SHARED_VALIDATE"); |
| 42 | + } |
| 43 | + |
| 44 | + if flags & this.eval_libc_i32("MAP_STACK")? > 0 { |
| 45 | + throw_unsup_format!("Miri does not support MAP_STACK"); |
| 46 | + } |
| 47 | + |
| 48 | + if prot & this.eval_libc_i32("PROT_EXEC")? > 0 { |
| 49 | + print_args(); |
| 50 | + throw_unsup_format!("Miri does not support mapping executable pages"); |
| 51 | + } |
| 52 | + |
| 53 | + if offset != 0 { |
| 54 | + print_args(); |
| 55 | + throw_unsup_format!("Miri does not support non-zero offsets to mmap (yet)"); |
| 56 | + } |
| 57 | + |
| 58 | + if !this.ptr_is_null(addr)? { |
| 59 | + print_args(); |
| 60 | + throw_unsup_format!("Miri does not support non-null pointers to mmap"); |
| 61 | + } |
| 62 | + |
| 63 | + if length == 0 { |
| 64 | + print_args(); |
| 65 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")?))?; |
| 66 | + return Ok(Pointer::null()); |
| 67 | + } |
| 68 | + |
| 69 | + let align = Align::from_bytes(PAGE_SIZE).unwrap(); |
| 70 | + let map_length = ((length + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE; |
| 71 | + |
| 72 | + if (flags == map_private | map_anonymous) || (flags == map_private && fd != -1) { |
| 73 | + // mmap as a memory allocator |
| 74 | + let ptr = this.allocate_ptr( |
| 75 | + Size::from_bytes(map_length), |
| 76 | + align, |
| 77 | + MiriMemoryKind::Mmap.into(), |
| 78 | + )?; |
| 79 | + // We just allocated this, the access is definitely in-bounds and fits into our address space. |
| 80 | + // mmap guarantees new mappings are zero-init. |
| 81 | + this.write_bytes_ptr( |
| 82 | + ptr.into(), |
| 83 | + std::iter::repeat(0u8).take(usize::try_from(map_length).unwrap()), |
| 84 | + ) |
| 85 | + .unwrap(); |
| 86 | + let (prov, offset) = ptr.into_parts(); |
| 87 | + let ptr = Pointer::new(Some(prov), offset); |
| 88 | + |
| 89 | + this.machine.mappings.push(Mapping { |
| 90 | + ptr, |
| 91 | + alloc_id: match prov { |
| 92 | + Provenance::Concrete { alloc_id, .. } => alloc_id, |
| 93 | + Provenance::Wildcard => |
| 94 | + unreachable!("allocate_ptr should not return a Wildcard pointer"), |
| 95 | + }, |
| 96 | + len: map_length, |
| 97 | + can_read: prot & prot_read > 0, |
| 98 | + can_write: prot & prot_write > 0, |
| 99 | + }); |
| 100 | + |
| 101 | + // If we were passed an fd, populate the first length bytes from the file |
| 102 | + if fd != -1 { |
| 103 | + let mut read_ptr = ptr; |
| 104 | + let mut bytes_remaining = length; |
| 105 | + loop { |
| 106 | + let bytes_read: u64 = |
| 107 | + this.read(fd, read_ptr, bytes_remaining)?.try_into().unwrap(); |
| 108 | + if bytes_read == 0 { |
| 109 | + break; |
| 110 | + } |
| 111 | + bytes_remaining -= bytes_read; |
| 112 | + read_ptr = read_ptr.offset(Size::from_bytes(bytes_read), &this.tcx)?; |
| 113 | + if bytes_remaining == 0 { |
| 114 | + break; |
| 115 | + } |
| 116 | + } |
| 117 | + // If we don't have the requested number of bytes (length), that's not an error. |
| 118 | + } |
| 119 | + |
| 120 | + Ok(ptr) |
| 121 | + } else { |
| 122 | + throw_unsup_format!( |
| 123 | + "mmap is not supported with arguments: (addr: {addr}, length: {length}, prot: {prot:x}, flags: {flags:0x}, fd: {fd}, offset: {offset})" |
| 124 | + ); |
| 125 | + } |
| 126 | + } |
| 127 | + |
| 128 | + fn mremap( |
| 129 | + &mut self, |
| 130 | + old_address: &OpTy<'tcx, Provenance>, |
| 131 | + old_size: &OpTy<'tcx, Provenance>, |
| 132 | + new_size: &OpTy<'tcx, Provenance>, |
| 133 | + flags: &OpTy<'tcx, Provenance>, |
| 134 | + ) -> InterpResult<'tcx, Pointer<Option<Provenance>>> { |
| 135 | + let this = self.eval_context_mut(); |
| 136 | + |
| 137 | + let old_address = this.read_pointer(old_address)?; |
| 138 | + let _old_size = this.read_scalar(old_size)?.to_machine_usize(this)?; |
| 139 | + let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?; |
| 140 | + let flags = this.read_scalar(flags)?.to_i32()?; |
| 141 | + |
| 142 | + if flags & this.eval_libc_i32("MREMAP_FIXED")? > 0 { |
| 143 | + throw_unsup_format!("Miri does not support mremap wth MREMAP_FIXED"); |
| 144 | + } |
| 145 | + |
| 146 | + if flags & this.eval_libc_i32("MREMAP_DONTUNMAP")? > 0 { |
| 147 | + throw_unsup_format!("Miri does not support mremap wth MREMAP_DONTUNMAP"); |
| 148 | + } |
| 149 | + |
| 150 | + if flags & this.eval_libc_i32("MREMAP_MAYMOVE")? == 0 { |
| 151 | + // We only support MREMAP_MAYMOVE, so not passing the flag is just a failure |
| 152 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")?))?; |
| 153 | + return Ok(Pointer::null()); |
| 154 | + } |
| 155 | + |
| 156 | + let map_idx = this.machine.mappings.iter_mut().position(|map| map.ptr == old_address); |
| 157 | + |
| 158 | + if let Some(i) = map_idx { |
| 159 | + let pointer = this.realloc(old_address, new_size, MiriMemoryKind::Mmap)?; |
| 160 | + let map = &mut this.machine.mappings[i]; |
| 161 | + map.ptr = pointer; |
| 162 | + map.len = new_size; |
| 163 | + map.alloc_id = match pointer.into_parts().0.unwrap() { |
| 164 | + Provenance::Concrete { alloc_id, .. } => alloc_id, |
| 165 | + Provenance::Wildcard => |
| 166 | + unreachable!("allocate_ptr should not return a Wildcard pointer"), |
| 167 | + }; |
| 168 | + Ok(pointer) |
| 169 | + } else { |
| 170 | + // This isn't a previous mapping |
| 171 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")?))?; |
| 172 | + Ok(Pointer::null()) |
| 173 | + } |
| 174 | + } |
| 175 | + |
| 176 | + fn munmap( |
| 177 | + &mut self, |
| 178 | + addr: &OpTy<'tcx, Provenance>, |
| 179 | + length: &OpTy<'tcx, Provenance>, |
| 180 | + ) -> InterpResult<'tcx, i32> { |
| 181 | + let this = self.eval_context_mut(); |
| 182 | + |
| 183 | + let addr = this.read_pointer(addr)?; |
| 184 | + let length = this.read_scalar(length)?.to_machine_usize(this)?; |
| 185 | + |
| 186 | + // The address addr must be a multiple of the page size (but length need not be). |
| 187 | + if addr.addr().bytes() % PAGE_SIZE != 0 { |
| 188 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")?))?; |
| 189 | + return Ok(-1); |
| 190 | + } |
| 191 | + |
| 192 | + // All pages containing a part of the indicated range are unmapped. |
| 193 | + // TODO: That means we can actually alter multiple mappings with munmap :/ |
| 194 | + let length = ((length + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE; |
| 195 | + |
| 196 | + let map_idx = this.machine.mappings.iter_mut().position(|map| { |
| 197 | + let start = map.ptr.addr(); |
| 198 | + let end = map.ptr.addr() + Size::from_bytes(map.len); |
| 199 | + addr.addr() >= start && addr.addr() < end |
| 200 | + }); |
| 201 | + |
| 202 | + if let Some(i) = map_idx { |
| 203 | + let map = &this.machine.mappings[i]; |
| 204 | + if map.ptr.addr() == addr.addr() && map.len == length { |
| 205 | + this.machine.mappings.remove(i); |
| 206 | + this.free(addr, MiriMemoryKind::Mmap)?; |
| 207 | + } else { |
| 208 | + throw_unsup_format!("Miri does not support partial munmap"); |
| 209 | + } |
| 210 | + } |
| 211 | + Ok(0) |
| 212 | + // It is not an error if the indicated range does not contain any mapped pages. |
| 213 | + } |
| 214 | +} |
0 commit comments