diff --git a/doc/insref/src/export.rs b/doc/insref/src/export.rs index ad7321fa27..41fde69f1a 100644 --- a/doc/insref/src/export.rs +++ b/doc/insref/src/export.rs @@ -9,7 +9,7 @@ fn main() { let opmap = match args.next().expect("Architecture name").as_str() { "x64" => dynasm::dynasm_extract!(x64), "aarch64" => dynasm::dynasm_extract!(aarch64), - x => panic!("Unknown opmap format '{}'", x) + x => panic!("Unknown opmap format '{}'", x), }; let stdout = io::stdout(); diff --git a/doc/insref/src/main.rs b/doc/insref/src/main.rs index 7654197ad4..384bdc936d 100644 --- a/doc/insref/src/main.rs +++ b/doc/insref/src/main.rs @@ -7,7 +7,7 @@ fn main() { let opmap = match args.next().expect("Architecture name").as_str() { "x64" => dynasm::dynasm_opmap!(x64), "aarch64" => dynasm::dynasm_opmap!(aarch64), - x => panic!("Unknown opmap format '{}'", x) + x => panic!("Unknown opmap format '{}'", x), }; let stdout = io::stdout(); diff --git a/plugin/src/arch/aarch64/aarch64data.rs b/plugin/src/arch/aarch64/aarch64data.rs index 9ca4cf8db0..86f1e07294 100644 --- a/plugin/src/arch/aarch64/aarch64data.rs +++ b/plugin/src/arch/aarch64/aarch64data.rs @@ -1,8 +1,8 @@ -use crate::common::Size; use super::ast::Modifier; +use crate::common::Size; use lazy_static::lazy_static; -use std::collections::{HashMap, hash_map}; +use std::collections::{hash_map, HashMap}; #[derive(Debug, Clone, Copy, PartialEq)] pub enum Matcher { @@ -83,11 +83,11 @@ pub enum Matcher { #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Command { // commands that advance the argument pointer - R(u8), // encode a register, or reference base, into a 5-bit bitfield. + R(u8), // encode a register, or reference base, into a 5-bit bitfield. REven(u8), // same as R, but requires that the register is even. RNoZr(u8), // same as R, but does not allow register 31. - R4(u8), // encode a register in the range 0-15 into a 4-bit bitfield - RNext, // encode that this register should be the previous register, plus one + R4(u8), // encode a register in the range 0-15 into a 4-bit bitfield + RNext, // encode that this register should be the previous register, plus one // unsigned immediate encodings Ubits(u8, u8), // encodes an unsigned immediate starting at bit .0, .1 bits long @@ -100,14 +100,14 @@ pub enum Command { Ufields(&'static [u8]), // an immediate, encoded bitwise with the highest bit going into field 0, up to the lowest going into the last bitfield. // signed immediate encodings - Sbits(u8, u8), // encodes a signed immediate starting at bit .0, .1 bits long + Sbits(u8, u8), // encodes a signed immediate starting at bit .0, .1 bits long Sscaled(u8, u8, u8), // encodes a signed immediate, starting at bit .0, .1 bits long, shifted .2 bits to the right before encoding // bit slice encodings. These don't advance the current argument. Only the slice argument actually encodes anything BUbits(u8), // checks if the pointed value fits in the given amount of bits - BUsum(u8), // checks that the pointed value fits between 1 and (1 << .0) - prev + BUsum(u8), // checks that the pointed value fits between 1 and (1 << .0) - prev BSscaled(u8, u8), - BUrange(u8, u8), // check if the pointed value is between min/max + BUrange(u8, u8), // check if the pointed value is between min/max Uslice(u8, u8, u8), // encodes at .0, .1 bits long, the bitslice starting at .2 from the current arg Sslice(u8, u8, u8), // encodes at .0, .1 bits long, the bitslice starting at .2 from the current arg @@ -118,7 +118,7 @@ pub enum Command { Rwidth(u8), // Extend/Shift fields - Rotates(u8), // 2-bits field encoding [LSL, LSR, ASR, ROR] + Rotates(u8), // 2-bits field encoding [LSL, LSR, ASR, ROR] ExtendsW(u8), // 3-bits field encoding [UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX]. Additionally, LSL is interpreted as UXTW ExtendsX(u8), // 3-bits field encoding [UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX]. Additionally, LSL is interpreted as UXTX @@ -181,7 +181,6 @@ impl Relocation { } } - #[derive(Debug, Clone, Copy)] pub struct Opdata { /// The base template for the encoding. @@ -189,7 +188,7 @@ pub struct Opdata { /// A set of matchers capable of matching the instruction encoding that this instruction represents. pub matchers: &'static [Matcher], /// A sequence of encoder commands that check the matched instruction on validity and whose output gets orred together with the original template at runtime. - pub commands: &'static [Command] + pub commands: &'static [Command], } macro_rules! SingleOp { diff --git a/plugin/src/arch/aarch64/ast.rs b/plugin/src/arch/aarch64/ast.rs index 4488f41044..a3a49cc5a4 100644 --- a/plugin/src/arch/aarch64/ast.rs +++ b/plugin/src/arch/aarch64/ast.rs @@ -1,21 +1,20 @@ -use syn; use proc_macro2::Span; +use syn; -use crate::common::{Size, Jump}; - +use crate::common::{Jump, Size}; /// A complete abstraction of an aarch64 register access. #[derive(Debug, Clone)] pub enum Register { Scalar(RegScalar), - Vector(RegVector) + Vector(RegVector), } -/// A vcalar register. Can be either of the integer or simd families. +/// A vcalar register. Can be either of the integer or simd families. #[derive(Debug, Clone)] pub struct RegScalar { pub kind: RegKind, - pub size: Size + pub size: Size, } /// A vector register. Can only be of the simd family @@ -24,53 +23,100 @@ pub struct RegVector { pub kind: RegKind, pub element_size: Size, pub lanes: Option, - pub element: Option + pub element: Option, } // Register id without indication of its usage. Either a static Regid or a family identifier + expression to choose the register #[derive(Debug, Clone)] pub enum RegKind { Static(RegId), - Dynamic(RegFamily, syn::Expr) + Dynamic(RegFamily, syn::Expr), } // a register identifier. This identifies an architecturally completely separate register. #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum RegId { // regular registers. Either 4 or 8 bytes - X0 = 0x00, X1 = 0x01, X2 = 0x02, X3 = 0x03, - X4 = 0x04, X5 = 0x05, X6 = 0x06, X7 = 0x07, - X8 = 0x08, X9 = 0x09, X10= 0x0A, X11= 0x0B, - X12= 0x0C, X13= 0x0D, X14= 0x0E, X15= 0x0F, - X16= 0x10, X17= 0x11, X18= 0x12, X19= 0x13, - X20= 0x14, X21= 0x15, X22= 0x16, X23= 0x17, - X24= 0x18, X25= 0x19, X26= 0x1A, X27= 0x1B, - X28= 0x1C, X29= 0x1D, X30= 0x1E, + X0 = 0x00, + X1 = 0x01, + X2 = 0x02, + X3 = 0x03, + X4 = 0x04, + X5 = 0x05, + X6 = 0x06, + X7 = 0x07, + X8 = 0x08, + X9 = 0x09, + X10 = 0x0A, + X11 = 0x0B, + X12 = 0x0C, + X13 = 0x0D, + X14 = 0x0E, + X15 = 0x0F, + X16 = 0x10, + X17 = 0x11, + X18 = 0x12, + X19 = 0x13, + X20 = 0x14, + X21 = 0x15, + X22 = 0x16, + X23 = 0x17, + X24 = 0x18, + X25 = 0x19, + X26 = 0x1A, + X27 = 0x1B, + X28 = 0x1C, + X29 = 0x1D, + X30 = 0x1E, // zero register. Either 4 or 8 bytes - XZR= 0x1F, + XZR = 0x1F, // stack pointer. Either 4 or 8 bytes. the encoding overlaps XZR, and we only differentiate // the two of them to provide diagnostics. They count as the same family. SP = 0x3F, // scalar FP / vector SIMD registers. Can be used as 1, 2, 4, 8 or 16-byte size. - V0 = 0x40, V1 = 0x41, V2 = 0x42, V3 = 0x43, - V4 = 0x44, V5 = 0x45, V6 = 0x46, V7 = 0x47, - V8 = 0x48, V9 = 0x49, V10= 0x4A, V11= 0x4B, - V12= 0x4C, V13= 0x4D, V14= 0x4E, V15= 0x4F, - V16= 0x50, V17= 0x51, V18= 0x52, V19= 0x53, - V20= 0x54, V21= 0x55, V22= 0x56, V23= 0x57, - V24= 0x58, V25= 0x59, V26= 0x5A, V27= 0x5B, - V28= 0x5C, V29= 0x5D, V30= 0x5E, V31= 0x5F + V0 = 0x40, + V1 = 0x41, + V2 = 0x42, + V3 = 0x43, + V4 = 0x44, + V5 = 0x45, + V6 = 0x46, + V7 = 0x47, + V8 = 0x48, + V9 = 0x49, + V10 = 0x4A, + V11 = 0x4B, + V12 = 0x4C, + V13 = 0x4D, + V14 = 0x4E, + V15 = 0x4F, + V16 = 0x50, + V17 = 0x51, + V18 = 0x52, + V19 = 0x53, + V20 = 0x54, + V21 = 0x55, + V22 = 0x56, + V23 = 0x57, + V24 = 0x58, + V25 = 0x59, + V26 = 0x5A, + V27 = 0x5B, + V28 = 0x5C, + V29 = 0x5D, + V30 = 0x5E, + V31 = 0x5F, } // register family. INTEGER = Xn/Wn including XZR/WZR. INTEGERSP is just SP or XSP. SIMD = Bn/Hn/Sn/Dn/Qn #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum RegFamily { - INTEGER = 0, + INTEGER = 0, INTEGERSP = 1, - SIMD = 2, + SIMD = 2, } impl RegId { @@ -85,7 +131,7 @@ impl RegId { 0 => RegFamily::INTEGER, 1 => RegFamily::INTEGERSP, 2 => RegFamily::SIMD, - _ => unreachable!() + _ => unreachable!(), } } } @@ -95,7 +141,7 @@ impl RegKind { pub fn code(&self) -> Option { match self { RegKind::Static(code) => Some(code.code()), - RegKind::Dynamic(_, _) => None + RegKind::Dynamic(_, _) => None, } } @@ -108,7 +154,7 @@ impl RegKind { pub fn family(&self) -> RegFamily { match *self { RegKind::Static(code) => code.family(), - RegKind::Dynamic(family, _) => family + RegKind::Dynamic(family, _) => family, } } @@ -116,7 +162,7 @@ impl RegKind { pub fn is_dynamic(&self) -> bool { match self { RegKind::Static(_) => false, - RegKind::Dynamic(_, _) => true + RegKind::Dynamic(_, _) => true, } } @@ -158,7 +204,7 @@ impl RegVector { pub fn full_size(&self) -> Option { if let Some(lanes) = self.lanes { Some(u16::from(lanes) * u16::from(self.element_size.in_bytes())) - } else { + } else { None } } @@ -168,21 +214,21 @@ impl Register { pub fn size(&self) -> Size { match self { Register::Scalar(s) => s.size(), - Register::Vector(v) => v.element_size() + Register::Vector(v) => v.element_size(), } } pub fn kind(&self) -> &RegKind { match self { Register::Scalar(s) => &s.kind, - Register::Vector(v) => &v.kind + Register::Vector(v) => &v.kind, } } pub fn kind_owned(self) -> RegKind { match self { Register::Scalar(s) => s.kind, - Register::Vector(v) => v.kind + Register::Vector(v) => v.kind, } } @@ -196,7 +242,7 @@ impl Register { pub fn assume_vector(&self) -> &RegVector { match self { Register::Scalar(_) => panic!("That wasn't a vector register"), - Register::Vector(v) => v + Register::Vector(v) => v, } } } @@ -243,11 +289,7 @@ impl Modifier { pub fn expr_required(self) -> bool { match self { - Modifier::LSL - | Modifier::LSR - | Modifier::ASR - | Modifier::ROR - | Modifier::MSL => true, + Modifier::LSL | Modifier::LSR | Modifier::ASR | Modifier::ROR | Modifier::MSL => true, Modifier::SXTX | Modifier::SXTW | Modifier::SXTH @@ -263,15 +305,12 @@ impl Modifier { #[derive(Debug, Clone)] pub struct ModifyExpr { pub op: Modifier, - pub expr: Option + pub expr: Option, } impl ModifyExpr { pub fn new(op: Modifier, expr: Option) -> ModifyExpr { - ModifyExpr { - op, - expr - } + ModifyExpr { op, expr } } } @@ -281,17 +320,9 @@ impl ModifyExpr { #[derive(Debug)] pub enum RefItem { - Direct { - span: Span, - reg: Register - }, - Immediate { - value: syn::Expr - }, - Modifier { - span: Span, - modifier: ModifyExpr - } + Direct { span: Span, reg: Register }, + Immediate { value: syn::Expr }, + Modifier { span: Span, modifier: ModifyExpr }, } // basic parse results, before we start doing any kind of checking @@ -301,61 +332,61 @@ pub enum RawArg { Reference { span: Span, items: Vec, - bang: bool + bang: bool, }, // A register list, defined as first - last DashList { span: Span, first: Register, last: Register, - element: Option + element: Option, }, // A register list, defined as item, item, item, item - CommaList{ + CommaList { span: Span, items: Vec, - element: Option + element: Option, }, AmountList { span: Span, first: Register, amount: syn::Expr, - element: Option + element: Option, }, // direct register reference Direct { span: Span, - reg: Register + reg: Register, }, // jump target. Also used by PC-rel loads etc JumpTarget { - jump: Jump + jump: Jump, }, // just an arbitrary expression Immediate { prefixed: bool, - value: syn::Expr + value: syn::Expr, }, // a modifier Modifier { span: Span, - modifier: ModifyExpr + modifier: ModifyExpr, }, // a dot Dot { - span: Span + span: Span, }, // an ident, not intended to be parsed as an expression Lit { - ident: syn::Ident - } + ident: syn::Ident, + }, } // Contains the actual instruction mnemnonic. #[derive(Debug)] pub struct Instruction { pub span: Span, - pub ident: syn::Ident + pub ident: syn::Ident, } #[derive(Debug)] @@ -372,20 +403,20 @@ pub enum CleanArg { Reference { span: Span, base: Register, - kind: RefKind + kind: RefKind, }, RegList { span: Span, first: Register, amount: u8, - element: Option + element: Option, }, Direct { span: Span, - reg: Register + reg: Register, }, JumpTarget { - jump: Jump + jump: Jump, }, Immediate { prefixed: bool, @@ -393,35 +424,23 @@ pub enum CleanArg { }, Modifier { span: Span, - modifier: ModifyExpr + modifier: ModifyExpr, }, Dot { - span: Span + span: Span, }, Lit { - ident: syn::Ident - } + ident: syn::Ident, + }, } // flat arg list after matching, for encoding #[derive(Debug)] pub enum FlatArg { - Direct { - span: Span, - reg: RegKind - }, - Immediate { - value: syn::Expr, - }, - Modifier { - span: Span, - modifier: Modifier, - }, - JumpTarget { - jump: Jump - }, - Lit { - ident: syn::Ident - }, - Default + Direct { span: Span, reg: RegKind }, + Immediate { value: syn::Expr }, + Modifier { span: Span, modifier: Modifier }, + JumpTarget { jump: Jump }, + Lit { ident: syn::Ident }, + Default, } diff --git a/plugin/src/arch/aarch64/compiler.rs b/plugin/src/arch/aarch64/compiler.rs index 401c223a79..2811d92d86 100644 --- a/plugin/src/arch/aarch64/compiler.rs +++ b/plugin/src/arch/aarch64/compiler.rs @@ -1,18 +1,21 @@ +use super::aarch64data::{Command, Relocation, SpecialComm, COND_MAP, SPECIAL_IDENT_MAP}; +use super::ast::{FlatArg, Modifier, RegId, RegKind}; +use super::encoding_helpers; use super::matching::MatchData; -use super::aarch64data::{Command, COND_MAP, SPECIAL_IDENT_MAP, SpecialComm, Relocation}; use super::Context; -use super::ast::{FlatArg, RegKind, RegId, Modifier}; -use super::encoding_helpers; -use crate::common::{Stmt, Size, delimited, bitmask}; -use crate::parse_helpers::{as_ident, as_number, as_float, as_signed_number}; +use crate::common::{bitmask, delimited, Size, Stmt}; +use crate::parse_helpers::{as_float, as_ident, as_number, as_signed_number}; -use syn::spanned::Spanned; -use quote::{quote, quote_spanned}; use proc_macro2::TokenStream; use proc_macro_error::emit_error; +use quote::{quote, quote_spanned}; +use syn::spanned::Spanned; -pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result<(), Option> { +pub(super) fn compile_instruction( + ctx: &mut Context, + data: MatchData, +) -> Result<(), Option> { let mut cursor = 0usize; // All static bitfields (compile-time constant) will be encoded into this map of (offset, bitfield) @@ -27,88 +30,115 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< // special commands that don't check the current arg Command::A => { cursor += 1; - continue - }, + continue; + } Command::C => { cursor -= 1; - continue - }, + continue; + } Command::Rwidth(offset) => { statics.push((offset, data.simd_full_width.unwrap_or(true) as u32)); - continue - }, + continue; + } - _ => () + _ => (), } - let arg = data.args.get(cursor).expect("Invalid encoding data, tried to process more arguments than given"); + let arg = data + .args + .get(cursor) + .expect("Invalid encoding data, tried to process more arguments than given"); match *arg { - FlatArg::Direct { span, reg: RegKind::Static(id) } => match *command { - Command::R(offset) => { - statics.push((offset, u32::from(id.code()))); - }, - Command::REven(offset) => { - if id.code() & 1 != 0 { - emit_error!(span, "Field only supports even registers"); - return Err(None); + FlatArg::Direct { + span, + reg: RegKind::Static(id), + } => { + match *command { + Command::R(offset) => { + statics.push((offset, u32::from(id.code()))); } - statics.push((offset, u32::from(id.code()))); - }, - Command::RNoZr(offset) => { - if id.code() == 31 { - emit_error!(span, "Field does not support register the zr/sp register"); - return Err(None); + Command::REven(offset) => { + if id.code() & 1 != 0 { + emit_error!(span, "Field only supports even registers"); + return Err(None); + } + statics.push((offset, u32::from(id.code()))); } - statics.push((offset, u32::from(id.code()))); - }, - Command::R4(offset) => { - if id.code() >= 16 { - emit_error!(span, "Field only supports register numbers 0-15"); - return Err(None); + Command::RNoZr(offset) => { + if id.code() == 31 { + emit_error!(span, "Field does not support register the zr/sp register"); + return Err(None); + } + statics.push((offset, u32::from(id.code()))); } - statics.push((offset, u32::from(id.code()))); - }, - Command::RNext => { - if let Some(FlatArg::Direct { span: _prevspan, reg: ref prevreg } ) = data.args.get(cursor - 1) { - match prevreg { - RegKind::Static(previd) => if id.code() != ((previd.code() + 1) % 32) { - emit_error!(span, "Invalid register. This register has to be the register after the previous argument."); - return Err(None); - }, - RegKind::Dynamic(_, _) => if id != RegId::XZR { - emit_error!(span, "Please use XZR here to indicate that it should be the register after the previous argument."); - return Err(None); + Command::R4(offset) => { + if id.code() >= 16 { + emit_error!(span, "Field only supports register numbers 0-15"); + return Err(None); + } + statics.push((offset, u32::from(id.code()))); + } + Command::RNext => { + if let Some(FlatArg::Direct { + span: _prevspan, + reg: ref prevreg, + }) = data.args.get(cursor - 1) + { + match prevreg { + RegKind::Static(previd) => { + if id.code() != ((previd.code() + 1) % 32) { + emit_error!(span, "Invalid register. This register has to be the register after the previous argument."); + return Err(None); + } + } + RegKind::Dynamic(_, _) => { + if id != RegId::XZR { + emit_error!(span, "Please use XZR here to indicate that it should be the register after the previous argument."); + return Err(None); + } + } } + } else { + panic!("RNext command without the previous command being a register encoder"); } - } else { - panic!("RNext command without the previous command being a register encoder"); } - }, - _ => panic!("Invalid argument processor") - }, - FlatArg::Direct { span, reg: RegKind::Dynamic(_, ref expr) } => match *command { - Command::R(offset) - | Command::RNoZr(offset) => { - dynamics.push((offset, quote_spanned!{ span=> - #expr & 0x1F - })); - }, + _ => panic!("Invalid argument processor"), + } + } + FlatArg::Direct { + span, + reg: RegKind::Dynamic(_, ref expr), + } => match *command { + Command::R(offset) | Command::RNoZr(offset) => { + dynamics.push(( + offset, + quote_spanned! { span=> + #expr & 0x1F + }, + )); + } Command::REven(offset) => { - dynamics.push((offset, quote_spanned!{ span=> - #expr & 0x1E - })); - }, + dynamics.push(( + offset, + quote_spanned! { span=> + #expr & 0x1E + }, + )); + } Command::R4(offset) => { - dynamics.push((offset, quote_spanned!{ span=> - #expr & 0xF - })); - }, + dynamics.push(( + offset, + quote_spanned! { span=> + #expr & 0xF + }, + )); + } Command::RNext => { emit_error!(span, "This register is constrained to be the register after the previous argument's register. As such, it does not support dynamic registers. Please substitute it with XZR to indicate this"); return Err(None); - }, - _ => panic!("Invalid argument processor") + } + _ => panic!("Invalid argument processor"), }, FlatArg::Modifier { modifier, .. } => match *command { Command::Rotates(offset) => match modifier { @@ -116,7 +146,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< Modifier::LSR => statics.push((offset, 0b01)), Modifier::ASR => statics.push((offset, 0b10)), Modifier::ROR => statics.push((offset, 0b11)), - _ => panic!("Unexpected modifier for argument processor") + _ => panic!("Unexpected modifier for argument processor"), }, Command::ExtendsW(offset) => match modifier { Modifier::UXTB => statics.push((offset, 0b000)), @@ -127,8 +157,8 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< Modifier::SXTH => statics.push((offset, 0b101)), Modifier::SXTW => statics.push((offset, 0b110)), Modifier::SXTX => statics.push((offset, 0b111)), - Modifier::LSL => statics.push((offset, 0b010)), - _ => panic!("Unexpected modifier for argument processor") + Modifier::LSL => statics.push((offset, 0b010)), + _ => panic!("Unexpected modifier for argument processor"), }, Command::ExtendsX(offset) => match modifier { Modifier::UXTB => statics.push((offset, 0b000)), @@ -139,24 +169,23 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< Modifier::SXTH => statics.push((offset, 0b101)), Modifier::SXTW => statics.push((offset, 0b110)), Modifier::SXTX => statics.push((offset, 0b111)), - Modifier::LSL => statics.push((offset, 0b011)), - _ => panic!("Unexpected modifier for argument processor") + Modifier::LSL => statics.push((offset, 0b011)), + _ => panic!("Unexpected modifier for argument processor"), }, - _ => panic!("Invalid argument processor") + _ => panic!("Invalid argument processor"), }, FlatArg::Immediate { ref value } => match *command { - // Condition codes, literals Command::Cond(offset) => { let name = as_ident(value).expect("bad command data").to_string(); let bits = *COND_MAP.get(&&*name).expect("bad command data"); statics.push((offset, u32::from(bits))) - }, + } Command::CondInv(offset) => { let name = as_ident(value).expect("bad command data").to_string(); let bits = *COND_MAP.get(&&*name).expect("bad command data"); statics.push((offset, u32::from(bits) ^ 1)) - }, + } Command::LitList(offset, listname) => { let name = as_ident(value).expect("bad command data").to_string(); let list = SPECIAL_IDENT_MAP.get(listname).expect("bad command data"); @@ -166,7 +195,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< emit_error!(value, "Unknown literal"); return Err(None); } - }, + } // unsigned integer encodings Command::Ubits(offset, bitlen) => { @@ -174,31 +203,40 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = unsigned_rangecheck(value, 0, mask, 0) { statics.push((offset, value?)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - #value & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + #value & #mask + }, + )); } - }, + } Command::Uscaled(offset, bitlen, shift) => { let mask = bitmask(bitlen); if let Some(value) = unsigned_rangecheck(value, 0, mask, shift) { statics.push((offset, value?)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - (#value >> #shift) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#value >> #shift) & #mask + }, + )); } - }, + } Command::Uslice(offset, bitlen, shift) => { let mask = bitmask(bitlen); if let Some(value) = as_number(value) { statics.push((offset, ((value as u32) >> shift) & mask)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - (#value >> #shift) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#value >> #shift) & #mask + }, + )); } - }, + } Command::Ulist(offset, options) => { if let Some(number) = as_number(value) { if let Some(i) = options.iter().rposition(|&n| u64::from(n) == number) { @@ -212,7 +250,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< [#(#options),*].iter().rposition(|&n| n as u32 == #value).expect("impossible value") as u32 })); } - }, + } Command::Urange(offset, min, max) => { let max = u32::from(max); let min = u32::from(min); @@ -221,43 +259,56 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< } else { let range = max - min; let mask = range.next_power_of_two() - 1; - dynamics.push((offset, quote_spanned!{ value.span()=> - (#value - #min) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#value - #min) & #mask + }, + )); } - }, + } Command::Usub(offset, bitlen, addval) => { let mask = bitmask(bitlen); let addval = u32::from(addval); if let Some(value) = unsigned_rangecheck(value, addval - mask, addval, 0) { statics.push((offset, addval - value?)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - (#addval - #value) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#addval - #value) & #mask + }, + )); } - }, + } Command::Unegmod(offset, bitlen) => { let mask = bitmask(bitlen); let addval = 1u32 << bitlen; if let Some(value) = unsigned_rangecheck(value, 0, mask, 0) { statics.push((offset, (addval - value?) & mask)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - (#addval - #value) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#addval - #value) & #mask + }, + )); } - }, + } Command::Usumdec(offset, bitlen) => { let mask = bitmask(bitlen); - if let Some(FlatArg::Immediate {value: leftvalue } ) = data.args.get(cursor - 1) { - dynamics.push((offset, quote_spanned!{ value.span()=> - (#leftvalue + #value - 1) & #mask - })); + if let Some(FlatArg::Immediate { value: leftvalue }) = data.args.get(cursor - 1) + { + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#leftvalue + #value - 1) & #mask + }, + )); } else { panic!("Bad encoding data, previous argument was not an immediate"); } - }, + } Command::Ufields(bitfields) => { let mask = bitmask(bitfields.len() as u8); if let Some(value) = unsigned_rangecheck(value, 0, mask, 0) { @@ -267,12 +318,15 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< } } else { for (i, &field) in bitfields.iter().rev().enumerate() { - dynamics.push((field as u8, quote_spanned!{ value.span()=> - (#value >> #i) & 1 - })); + dynamics.push(( + field as u8, + quote_spanned! { value.span()=> + (#value >> #i) & 1 + }, + )); } } - }, + } // signed integer encoding Command::Sbits(offset, bitlen) => { @@ -281,32 +335,41 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, 0) { statics.push((offset, (value? as u32) & mask)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - (#value as u32) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + (#value as u32) & #mask + }, + )); } - }, + } Command::Sscaled(offset, bitlen, shift) => { let mask = bitmask(bitlen); let half = -1i32 << (bitlen - 1); if let Some(value) = signed_rangecheck(value, half, mask as i32 - half, shift) { statics.push((offset, (value? as u32) & mask)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - ((#value >> #shift) as u32) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + ((#value >> #shift) as u32) & #mask + }, + )); } - }, + } Command::Sslice(offset, bitlen, shift) => { let mask = bitmask(bitlen); if let Some(value) = as_signed_number(value) { statics.push((offset, ((value >> shift) as u32) & mask)); } else { - dynamics.push((offset, quote_spanned!{ value.span()=> - ((#value >> #shift) as u32) & #mask - })); + dynamics.push(( + offset, + quote_spanned! { value.span()=> + ((#value >> #shift) as u32) & #mask + }, + )); } - }, + } // nonconsuming integer checks Command::BUbits(bitlen) => { @@ -314,9 +377,11 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = unsigned_rangecheck(value, 0, mask, 0) { value?; } - }, + } Command::BUsum(bitlen) => { - let prev = if let Some(FlatArg::Immediate {value: leftvalue } ) = data.args.get(cursor - 1) { + let prev = if let Some(FlatArg::Immediate { value: leftvalue }) = + data.args.get(cursor - 1) + { leftvalue } else { panic!("Bad encoding data, previous argument was not an immediate"); @@ -328,28 +393,30 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = unsigned_rangecheck(value, 1, max, 0) { value?; } - }, + } Command::BSscaled(bitlen, shift) => { let mask = bitmask(bitlen); let half = -1i32 << (bitlen - 1); if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, shift) { value?; } - }, + } Command::BUrange(min, max) => { let min = u32::from(min); let max = u32::from(max); if let Some(value) = unsigned_rangecheck(value, min, max, 0) { value?; } - }, + } // specials. These have some more involved code. - Command::Special(offset, special) => handle_special_immediates(offset, special, value, &mut statics, &mut dynamics)?, + Command::Special(offset, special) => { + handle_special_immediates(offset, special, value, &mut statics, &mut dynamics)? + } // jump targets also accept immediates Command::Offset(relocation) => match relocation { - // b, bl 26 bits, dword aligned + // b, bl 26 bits, dword aligned Relocation::B => { let bits = 26; let mask = bitmask(bits); @@ -357,11 +424,14 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, 2) { statics.push((0, (value? as u32) & mask)); } else { - dynamics.push((0, quote_spanned!{ value.span()=> - ((#value >> 2) as u32) & #mask - })); + dynamics.push(( + 0, + quote_spanned! { value.span()=> + ((#value >> 2) as u32) & #mask + }, + )); } - }, + } // b.cond, cbnz, cbz, ldr, ldrsw, prfm: 19 bits, dword aligned Relocation::BCOND => { let bits = 19; @@ -370,11 +440,14 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, 2) { statics.push((5, (value? as u32) & mask)); } else { - dynamics.push((5, quote_spanned!{ value.span()=> - ((#value >> 2) as u32) & #mask - })); + dynamics.push(( + 5, + quote_spanned! { value.span()=> + ((#value >> 2) as u32) & #mask + }, + )); } - }, + } // adr split 21 bit, byte aligned Relocation::ADR => { let bits = 21; @@ -385,32 +458,45 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< statics.push((5, ((value >> 2) as u32) & 0x7FFFF)); statics.push((29, (value as u32) & 3)); } else { - dynamics.push((5, quote_spanned!{ value.span()=> - ((#value >> 2) as u32) & 0x7FFFF - })); - dynamics.push((29, quote_spanned!{ value.span()=> - (#value as u32) & 3 - })); + dynamics.push(( + 5, + quote_spanned! { value.span()=> + ((#value >> 2) as u32) & 0x7FFFF + }, + )); + dynamics.push(( + 29, + quote_spanned! { value.span()=> + (#value as u32) & 3 + }, + )); } - }, + } // adrp split 21 bit, 4096-byte aligned Relocation::ADRP => { let bits = 21; let mask = bitmask(bits); let half = -1i32 << (bits - 1); - if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, 12) { + if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, 12) + { let value = value?; statics.push((5, ((value >> 2) as u32) & 0x7FFFF)); statics.push((29, (value as u32) & 3)); } else { - dynamics.push((5, quote_spanned!{ value.span()=> - ((#value >> 14) as u32) & 0x7FFFF - })); - dynamics.push((29, quote_spanned!{ value.span()=> - ((#value >> 12) as u32) & 3 - })); + dynamics.push(( + 5, + quote_spanned! { value.span()=> + ((#value >> 14) as u32) & 0x7FFFF + }, + )); + dynamics.push(( + 29, + quote_spanned! { value.span()=> + ((#value >> 12) as u32) & 3 + }, + )); } - }, + } // tbnz, tbz: 14 bits, dword aligned Relocation::TBZ => { let bits = 14; @@ -419,53 +505,55 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< if let Some(value) = signed_rangecheck(value, half, mask as i32 + half, 2) { statics.push((5, (value? as u32) & mask)); } else { - dynamics.push((5, quote_spanned!{ value.span()=> - ((#value >> 2) as u32) & #mask - })); + dynamics.push(( + 5, + quote_spanned! { value.span()=> + ((#value >> 2) as u32) & #mask + }, + )); } - }, + } Relocation::LITERAL8 | Relocation::LITERAL16 | Relocation::LITERAL32 - | Relocation::LITERAL64 => () + | Relocation::LITERAL64 => (), }, - _ => panic!("Invalid argument processor") + _ => panic!("Invalid argument processor"), }, FlatArg::Default => match *command { // Registers default to R31 Command::R(offset) => { statics.push((offset, 0b11111u32)); - }, + } // modifiers to LSL Command::Rotates(offset) => { statics.push((offset, 0b00)); - }, + } Command::ExtendsW(offset) => { statics.push((offset, 0b010)); - }, + } Command::ExtendsX(offset) => { statics.push((offset, 0b011)); - }, + } // normal integer encodings default to 0 (i.e. not doing anything) // however encoders for which 0 is not necessarily a valid value cannot match default - Command::Ubits(_, _) | - Command::Uscaled(_, _, _) | - Command::Uslice(_, _, _) | - Command::Urange(_, _, _) | - Command::Ulist(_, _) | - Command::Ufields(_) | - Command::Sbits(_, _) | - Command::Sscaled(_, _, _) | - Command::Sslice(_, _, _) => (), + Command::Ubits(_, _) + | Command::Uscaled(_, _, _) + | Command::Uslice(_, _, _) + | Command::Urange(_, _, _) + | Command::Ulist(_, _) + | Command::Ufields(_) + | Command::Sbits(_, _) + | Command::Sscaled(_, _, _) + | Command::Sslice(_, _, _) => (), // integer checks don't have anything to check - Command::BUbits(_) | - Command::BSscaled(_, _) => (), + Command::BUbits(_) | Command::BSscaled(_, _) => (), - _ => panic!("Invalid argument processor") + _ => panic!("Invalid argument processor"), }, FlatArg::JumpTarget { ref jump } => match *command { Command::Offset(relocation) => { @@ -473,22 +561,21 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< let stmt = jump.clone().encode(4, 4, &[relocation.to_id()]); relocations.push(stmt); - }, - _ => panic!("Invalid argument processor") + } + _ => panic!("Invalid argument processor"), }, FlatArg::Lit { ref ident } => match *command { - // Condition codes, literals Command::Cond(offset) => { let name = ident.to_string(); let bits = *COND_MAP.get(&&*name).expect("bad command data"); statics.push((offset, u32::from(bits))) - }, + } Command::CondInv(offset) => { let name = ident.to_string(); let bits = *COND_MAP.get(&&*name).expect("bad command data"); statics.push((offset, u32::from(bits) ^ 1)) - }, + } Command::LitList(offset, listname) => { let name = ident.to_string(); let list = SPECIAL_IDENT_MAP.get(listname).expect("bad command data"); @@ -498,20 +585,19 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< emit_error!(ident, "Unknown literal"); return Err(None); } - }, - _ => panic!("Invalid argument processor") - } + } + _ => panic!("Invalid argument processor"), + }, } // figure out how far the cursor has to be advanced. match *command { - Command::Uslice(_, _, _) | - Command::Sslice(_, _, _) => (), - Command::BUbits(_) | - Command::BUsum(_) | - Command::BSscaled(_, _) | - Command::BUrange(_, _) => (), - _ => cursor += 1 + Command::Uslice(_, _, _) | Command::Sslice(_, _, _) => (), + Command::BUbits(_) + | Command::BUsum(_) + | Command::BSscaled(_, _) + | Command::BUrange(_, _) => (), + _ => cursor += 1, } } @@ -528,17 +614,21 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< // generate code to be emitted for dynamics if !dynamics.is_empty() { - let mut res = quote!{ + let mut res = quote! { #bits }; for (offset, expr) in dynamics { - res = quote!{ + res = quote! { #res | ((#expr) << #offset) }; } - ctx.state.stmts.push(Stmt::ExprUnsigned(delimited(res), Size::DWORD)); + ctx.state + .stmts + .push(Stmt::ExprUnsigned(delimited(res), Size::DWORD)); } else { - ctx.state.stmts.push(Stmt::Const(u64::from(bits), Size::DWORD)); + ctx.state + .stmts + .push(Stmt::Const(u64::from(bits), Size::DWORD)); } // generate code to be emitted for relocations @@ -547,154 +637,214 @@ pub(super) fn compile_instruction(ctx: &mut Context, data: MatchData) -> Result< Ok(()) } -fn handle_special_immediates(offset: u8, special: SpecialComm, imm: &syn::Expr, statics: &mut Vec<(u8, u32)>, dynamics: &mut Vec<(u8, TokenStream)>) -> Result<(), Option> { +fn handle_special_immediates( + offset: u8, + special: SpecialComm, + imm: &syn::Expr, + statics: &mut Vec<(u8, u32)>, + dynamics: &mut Vec<(u8, TokenStream)>, +) -> Result<(), Option> { match special { - SpecialComm::INVERTED_WIDE_IMMEDIATE_X => if let Some(number) = as_number(imm) { - if let Some(encoded) = encoding_helpers::encode_wide_immediate_64bit(!number) { - statics.push((offset, encoded as u32)); - return Ok(()); - } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> - { - let value: u64 = !#imm; - let offset = value.trailing_zeros() & 0b110000; - ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) - } - })); - return Ok(()); - }, - SpecialComm::INVERTED_WIDE_IMMEDIATE_W => if let Some(number) = as_number(imm) { - if number <= u64::from(std::u32::MAX) { - if let Some(encoded) = encoding_helpers::encode_wide_immediate_32bit(!(number as u32)) { + SpecialComm::INVERTED_WIDE_IMMEDIATE_X => { + if let Some(number) = as_number(imm) { + if let Some(encoded) = encoding_helpers::encode_wide_immediate_64bit(!number) { statics.push((offset, encoded as u32)); return Ok(()); } + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: u64 = !#imm; + let offset = value.trailing_zeros() & 0b110000; + ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) + } + }, + )); + return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> - { - let value: u64 = !#imm; - let offset = value.trailing_zeros() & 0b10000; - ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) + } + SpecialComm::INVERTED_WIDE_IMMEDIATE_W => { + if let Some(number) = as_number(imm) { + if number <= u64::from(std::u32::MAX) { + if let Some(encoded) = + encoding_helpers::encode_wide_immediate_32bit(!(number as u32)) + { + statics.push((offset, encoded as u32)); + return Ok(()); + } } - })); - return Ok(()); - }, - SpecialComm::WIDE_IMMEDIATE_X => if let Some(number) = as_number(imm) { - if let Some(encoded) = encoding_helpers::encode_wide_immediate_64bit(number) { - statics.push((offset, encoded as u32)); + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: u64 = !#imm; + let offset = value.trailing_zeros() & 0b10000; + ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) + } + }, + )); return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> - { - let value: u64 = #imm; - let offset = value.trailing_zeros() & 0b110000; - ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) - } - })); - return Ok(()); - }, - SpecialComm::WIDE_IMMEDIATE_W => if let Some(number) = as_number(imm) { - if number <= u64::from(std::u32::MAX) { - if let Some(encoded) = encoding_helpers::encode_wide_immediate_32bit(number as u32) { + } + SpecialComm::WIDE_IMMEDIATE_X => { + if let Some(number) = as_number(imm) { + if let Some(encoded) = encoding_helpers::encode_wide_immediate_64bit(number) { statics.push((offset, encoded as u32)); return Ok(()); } + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: u64 = #imm; + let offset = value.trailing_zeros() & 0b110000; + ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) + } + }, + )); + return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> - { - let value: u64 = #imm; - let offset = value.trailing_zeros() & 0b10000; - ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) + } + SpecialComm::WIDE_IMMEDIATE_W => { + if let Some(number) = as_number(imm) { + if number <= u64::from(std::u32::MAX) { + if let Some(encoded) = + encoding_helpers::encode_wide_immediate_32bit(number as u32) + { + statics.push((offset, encoded as u32)); + return Ok(()); + } } - })); - return Ok(()); - }, - SpecialComm::STRETCHED_IMMEDIATE => if let Some(number) = as_number(imm) { - if let Some(encoded) = encoding_helpers::encode_stretched_immediate(number) { - statics.push((offset, encoded & 0x1F as u32)); - statics.push((offset + 6, encoded & 0xE0 as u32)); + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: u64 = #imm; + let offset = value.trailing_zeros() & 0b10000; + ((0xFFFFu64 & (value >> offset)) as u32) | (offset << 12) + } + }, + )); return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> - { - let value: u64 = #imm; - let mut masked = value & 0x8040201008040201; - masked |= masked >> 32; - masked |= masked >> 16; - masked |= masked >> 8; - let masked = masked as u32; - ((masked & 0xE0) << 6) | (masked & 0x1F) - } - })); - return Ok(()); - }, - SpecialComm::LOGICAL_IMMEDIATE_W => if let Some(number) = as_number(imm) { - if number <= u64::from(std::u32::MAX) { - if let Some(encoded) = encoding_helpers::encode_logical_immediate_32bit(number as u32) { - statics.push((offset, u32::from(encoded))); + } + SpecialComm::STRETCHED_IMMEDIATE => { + if let Some(number) = as_number(imm) { + if let Some(encoded) = encoding_helpers::encode_stretched_immediate(number) { + statics.push((offset, encoded & 0x1F as u32)); + statics.push((offset + 6, encoded & 0xE0 as u32)); return Ok(()); } + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: u64 = #imm; + let mut masked = value & 0x8040201008040201; + masked |= masked >> 32; + masked |= masked >> 16; + masked |= masked >> 8; + let masked = masked as u32; + ((masked & 0xE0) << 6) | (masked & 0x1F) + } + }, + )); + return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> + } + SpecialComm::LOGICAL_IMMEDIATE_W => { + if let Some(number) = as_number(imm) { + if number <= u64::from(std::u32::MAX) { + if let Some(encoded) = + encoding_helpers::encode_logical_immediate_32bit(number as u32) + { + statics.push((offset, u32::from(encoded))); + return Ok(()); + } + } + } else { + dynamics.push((offset, quote_spanned!{ imm.span()=> dynasmrt::aarch64::encode_logical_immediate_32bit(#imm).expect("Impossible logical immediate") as u32 })); - return Ok(()); - }, - SpecialComm::LOGICAL_IMMEDIATE_X => if let Some(number) = as_number(imm) { - if let Some(encoded) = encoding_helpers::encode_logical_immediate_64bit(number) { - statics.push((offset, u32::from(encoded))); return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> + } + SpecialComm::LOGICAL_IMMEDIATE_X => { + if let Some(number) = as_number(imm) { + if let Some(encoded) = encoding_helpers::encode_logical_immediate_64bit(number) { + statics.push((offset, u32::from(encoded))); + return Ok(()); + } + } else { + dynamics.push((offset, quote_spanned!{ imm.span()=> dynasmrt::aarch64::encode_logical_immediate_64bit(#imm).expect("Impossible logical immediate") as u32 })); - return Ok(()); - }, - SpecialComm::FLOAT_IMMEDIATE => if let Some(number) = as_float(imm) { - if let Some(encoded) = encoding_helpers::encode_floating_point_immediate(number as f32) { - statics.push((offset, u32::from(encoded))); return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> + } + SpecialComm::FLOAT_IMMEDIATE => { + if let Some(number) = as_float(imm) { + if let Some(encoded) = + encoding_helpers::encode_floating_point_immediate(number as f32) { - let value: f32 = #imm; - let bits = value.to_bits(); - ((bits >> 24) & 0x80) | ((bits >> 19) & 0x7F) + statics.push((offset, u32::from(encoded))); + return Ok(()); } - })); - return Ok(()); - }, - SpecialComm::SPLIT_FLOAT_IMMEDIATE => if let Some(number) = as_float(imm) { - if let Some(encoded) = encoding_helpers::encode_floating_point_immediate(number as f32) { - statics.push((offset, u32::from(encoded & 0x1F))); - statics.push((offset + 6, u32::from(encoded & 0xE0))); + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: f32 = #imm; + let bits = value.to_bits(); + ((bits >> 24) & 0x80) | ((bits >> 19) & 0x7F) + } + }, + )); return Ok(()); } - } else { - dynamics.push((offset, quote_spanned!{ imm.span()=> + } + SpecialComm::SPLIT_FLOAT_IMMEDIATE => { + if let Some(number) = as_float(imm) { + if let Some(encoded) = + encoding_helpers::encode_floating_point_immediate(number as f32) { - let value: f32 = #imm; - let bits = value.to_bits(); - ((bits >> 18) & 0x80) | ((bits >> 13) & 0x60) | ((bits >> 19) & 0x1F) + statics.push((offset, u32::from(encoded & 0x1F))); + statics.push((offset + 6, u32::from(encoded & 0xE0))); + return Ok(()); } - })); - return Ok(()); - }, + } else { + dynamics.push(( + offset, + quote_spanned! { imm.span()=> + { + let value: f32 = #imm; + let bits = value.to_bits(); + ((bits >> 18) & 0x80) | ((bits >> 13) & 0x60) | ((bits >> 19) & 0x1F) + } + }, + )); + return Ok(()); + } + } } emit_error!(imm, "Impossible to encode immediate"); Err(None) } -fn unsigned_rangecheck(expr: &syn::Expr, min: u32, max: u32, scale: u8) -> Option>> { +fn unsigned_rangecheck( + expr: &syn::Expr, + min: u32, + max: u32, + scale: u8, +) -> Option>> { let value = as_number(expr)?; let scaled = value >> scale; @@ -712,7 +862,12 @@ fn unsigned_rangecheck(expr: &syn::Expr, min: u32, max: u32, scale: u8) -> Optio }) } -fn signed_rangecheck(expr: &syn::Expr, min: i32, max: i32, scale: u8) -> Option>> { +fn signed_rangecheck( + expr: &syn::Expr, + min: i32, + max: i32, + scale: u8, +) -> Option>> { let value = as_signed_number(expr)?; let scaled = value >> scale; diff --git a/plugin/src/arch/aarch64/debug.rs b/plugin/src/arch/aarch64/debug.rs index 1e323272ba..f76e32aad2 100644 --- a/plugin/src/arch/aarch64/debug.rs +++ b/plugin/src/arch/aarch64/debug.rs @@ -1,10 +1,9 @@ +use super::aarch64data::{Command, Matcher, Opdata, Relocation, SpecialComm}; use super::ast::Modifier; -use super::aarch64data::{Opdata, Matcher, Command, Relocation, SpecialComm}; use crate::common::Size; use std::fmt::Write; - #[cfg(feature = "dynasm_opmap")] pub fn create_opmap() -> String { let mut s = String::new(); @@ -16,19 +15,25 @@ pub fn create_opmap() -> String { // get the data for this mnemnonic let data = super::aarch64data::get_mnemonic_data(mnemnonic).unwrap(); // format the data for the opmap docs - let formats = data.into_iter() + let formats = data + .into_iter() .map(|x| format_opdata(mnemnonic, x)) .flat_map(|x| x) .map(|x| x.replace(">>> ", "")) .collect::>(); // push mnemnonic name as title - write!(s, "### {}\n```insref\n{}\n```\n", mnemnonic, formats.join("\n")).unwrap(); + write!( + s, + "### {}\n```insref\n{}\n```\n", + mnemnonic, + formats.join("\n") + ) + .unwrap(); } s } - #[cfg(feature = "dynasm_extract")] pub fn extract_opmap() -> String { let mut buf = Vec::new(); @@ -42,15 +47,14 @@ pub fn extract_opmap() -> String { buf.extend( data.into_iter() - .map(|x| extract_opdata(mnemnonic, x)) - .flat_map(|x| x) + .map(|x| extract_opdata(mnemnonic, x)) + .flat_map(|x| x), ); } buf.join("\n") } - pub fn format_opdata_list(name: &str, data: &[Opdata]) -> String { let mut forms = Vec::new(); @@ -62,21 +66,20 @@ pub fn format_opdata_list(name: &str, data: &[Opdata]) -> String { } pub fn format_opdata(name: &str, data: &Opdata) -> Vec { - let has_simd_full_width = data.matchers.iter().any(|m| match m { Matcher::V(_) | Matcher::RegList(_, _) => true, - _ => false + _ => false, }); let form_count = 1 + has_simd_full_width as u8; let mut forms = Vec::new(); - for i in 0 .. form_count { + for i in 0..form_count { let mut buf = format!(">>> {}", name); let (constraints, names) = match constraints_and_names(data) { Ok(o) => o, - Err(e) => panic!("Encountered a faulty op listing for {}: {}", name, e) + Err(e) => panic!("Encountered a faulty op listing for {}: {}", name, e), }; let mut first = true; @@ -116,34 +119,93 @@ pub fn format_opdata(name: &str, data: &Opdata) -> Vec { Matcher::Ident => write!(buf, "{}", arg_names[0]).unwrap(), Matcher::Cond => write!(buf, "").unwrap(), Matcher::Imm => write!(buf, "#{}", arg_names[0]).unwrap(), - Matcher::W => write!(buf, "W{}", arg_names[0]).unwrap(), - Matcher::X => write!(buf, "X{}", arg_names[0]).unwrap(), + Matcher::W => write!(buf, "W{}", arg_names[0]).unwrap(), + Matcher::X => write!(buf, "X{}", arg_names[0]).unwrap(), Matcher::WSP => write!(buf, "W{}|WSP", arg_names[0]).unwrap(), Matcher::XSP => write!(buf, "X{}|SP", arg_names[0]).unwrap(), - Matcher::B => write!(buf, "B{}", arg_names[0]).unwrap(), - Matcher::H => write!(buf, "H{}", arg_names[0]).unwrap(), - Matcher::S => write!(buf, "S{}", arg_names[0]).unwrap(), - Matcher::D => write!(buf, "D{}", arg_names[0]).unwrap(), - Matcher::Q => write!(buf, "Q{}", arg_names[0]).unwrap(), + Matcher::B => write!(buf, "B{}", arg_names[0]).unwrap(), + Matcher::H => write!(buf, "H{}", arg_names[0]).unwrap(), + Matcher::S => write!(buf, "S{}", arg_names[0]).unwrap(), + Matcher::D => write!(buf, "D{}", arg_names[0]).unwrap(), + Matcher::Q => write!(buf, "Q{}", arg_names[0]).unwrap(), Matcher::V(s) => { let width = if i == 0 { 16 } else { 8 }; - write!(buf, "V{}.{}{}", arg_names[0], size_to_string(*s), width / s.in_bytes()).unwrap(); - }, - Matcher::VStatic(s, c) => write!(buf, "V{}.{}{}", arg_names[0], size_to_string(*s), c).unwrap(), - Matcher::VElement(s) => write!(buf, "V{}.{}[{}]", arg_names[0], size_to_string(*s), arg_names[1]).unwrap(), - Matcher::VElementStatic(s, element) => write!(buf, "V{}.{}[{}]", arg_names[0], size_to_string(*s), element).unwrap(), - Matcher::VStaticElement(s, c) => write!(buf, "V{}.{}{}[{}]", arg_names[0], size_to_string(*s), c, arg_names[1]).unwrap(), + write!( + buf, + "V{}.{}{}", + arg_names[0], + size_to_string(*s), + width / s.in_bytes() + ) + .unwrap(); + } + Matcher::VStatic(s, c) => { + write!(buf, "V{}.{}{}", arg_names[0], size_to_string(*s), c).unwrap() + } + Matcher::VElement(s) => write!( + buf, + "V{}.{}[{}]", + arg_names[0], + size_to_string(*s), + arg_names[1] + ) + .unwrap(), + Matcher::VElementStatic(s, element) => { + write!(buf, "V{}.{}[{}]", arg_names[0], size_to_string(*s), element).unwrap() + } + Matcher::VStaticElement(s, c) => write!( + buf, + "V{}.{}{}[{}]", + arg_names[0], + size_to_string(*s), + c, + arg_names[1] + ) + .unwrap(), Matcher::RegList(a, s) => { let width = if i == 0 { 16 } else { 8 }; - write!(buf, "{{V{}.{}{} * {}}}", arg_names[0], size_to_string(*s), width / s.in_bytes(), a).unwrap(); - }, - Matcher::RegListStatic(a, s, c) => write!(buf, "{{V{}.{}{} * {}}}", arg_names[0], size_to_string(*s), c, a).unwrap(), - Matcher::RegListElement(a, s) => write!(buf, "{{V{}.{} * {}}}[{}]", arg_names[0], size_to_string(*s), a, arg_names[1]).unwrap(), + write!( + buf, + "{{V{}.{}{} * {}}}", + arg_names[0], + size_to_string(*s), + width / s.in_bytes(), + a + ) + .unwrap(); + } + Matcher::RegListStatic(a, s, c) => write!( + buf, + "{{V{}.{}{} * {}}}", + arg_names[0], + size_to_string(*s), + c, + a + ) + .unwrap(), + Matcher::RegListElement(a, s) => write!( + buf, + "{{V{}.{} * {}}}[{}]", + arg_names[0], + size_to_string(*s), + a, + arg_names[1] + ) + .unwrap(), Matcher::Offset => buf.push_str(&arg_names[0]), - Matcher::RefBase => write!(buf, "[X{}|SP]", arg_names[0]).unwrap(), - Matcher::RefOffset => write!(buf, "[X{}|SP {{, #{} }} ]", arg_names[0], arg_names[1]).unwrap(), - Matcher::RefPre => write!(buf, "[X{}|SP, #{}]!", arg_names[0], arg_names[1]).unwrap(), - Matcher::RefIndex => write!(buf, "[X{}|SP, W{}|X{} {{ , UXTW|LSL|SXTW|SXTX {{ #{} }} }} ]", arg_names[0], arg_names[1], arg_names[1], arg_names[3]).unwrap(), + Matcher::RefBase => write!(buf, "[X{}|SP]", arg_names[0]).unwrap(), + Matcher::RefOffset => { + write!(buf, "[X{}|SP {{, #{} }} ]", arg_names[0], arg_names[1]).unwrap() + } + Matcher::RefPre => { + write!(buf, "[X{}|SP, #{}]!", arg_names[0], arg_names[1]).unwrap() + } + Matcher::RefIndex => write!( + buf, + "[X{}|SP, W{}|X{} {{ , UXTW|LSL|SXTW|SXTX {{ #{} }} }} ]", + arg_names[0], arg_names[1], arg_names[1], arg_names[3] + ) + .unwrap(), Matcher::LitMod(m) => { buf.push_str(m.as_str()); if !m.expr_required() { @@ -151,18 +213,26 @@ pub fn format_opdata(name: &str, data: &Opdata) -> Vec { } else { write!(buf, " #{}", arg_names[0]).unwrap(); } - }, + } Matcher::Mod(mods) => { let mut required = false; let mut unsigned_extends = String::new(); - let mut signed_extends = String::new(); + let mut signed_extends = String::new(); let mut rest = Vec::new(); for m in *mods { required = required || m.expr_required(); match m { - Modifier::LSL | Modifier::LSR | Modifier::ASR | Modifier::ROR | Modifier::MSL => rest.push(m.as_str()), - Modifier::SXTX | Modifier::SXTW | Modifier::SXTH | Modifier::SXTB => signed_extends.push(m.as_str().chars().nth(3).unwrap()), - Modifier::UXTX | Modifier::UXTW | Modifier::UXTH | Modifier::UXTB => unsigned_extends.push(m.as_str().chars().nth(3).unwrap()), + Modifier::LSL + | Modifier::LSR + | Modifier::ASR + | Modifier::ROR + | Modifier::MSL => rest.push(m.as_str()), + Modifier::SXTX | Modifier::SXTW | Modifier::SXTH | Modifier::SXTB => { + signed_extends.push(m.as_str().chars().nth(3).unwrap()) + } + Modifier::UXTX | Modifier::UXTW | Modifier::UXTH | Modifier::UXTB => { + unsigned_extends.push(m.as_str().chars().nth(3).unwrap()) + } } } if !unsigned_extends.is_empty() { @@ -188,13 +258,12 @@ pub fn format_opdata(name: &str, data: &Opdata) -> Vec { } else { write!(buf, " #{}", arg_names[1]).unwrap(); } - }, - Matcher::End => () + } + Matcher::End => (), } - } - for _ in 0 .. end_count { + for _ in 0..end_count { buf.push_str(" }"); } @@ -220,14 +289,17 @@ pub fn size_to_string(size: Size) -> &'static str { Size::DWORD => "S", Size::QWORD => "D", Size::OWORD => "Q", - _ => unimplemented!() + _ => unimplemented!(), } } fn constraints_and_names(opdata: &Opdata) -> Result<(Option, Vec), &'static str> { let data = group_opdata(opdata)?; let constraints = format_constraints(&data); - let names = data.into_iter().map(|a| a.name.unwrap_or_else(|| "?".into())).collect(); + let names = data + .into_iter() + .map(|a| a.name.unwrap_or_else(|| "?".into())) + .collect(); Ok((constraints, names)) } @@ -239,12 +311,15 @@ fn group_opdata(opdata: &Opdata) -> Result, &'static str> { return Err("arg / command count mismatch"); } - let mut args: Vec<_> = args.into_iter().map(|(arg, can_be_default)| ArgWithCommands { - arg, - can_be_default, - commands: Vec::new(), - name: None - }).collect(); + let mut args: Vec<_> = args + .into_iter() + .map(|(arg, can_be_default)| ArgWithCommands { + arg, + can_be_default, + commands: Vec::new(), + name: None, + }) + .collect(); for (command, idx) in commands { args[idx].commands.push(command); @@ -258,14 +333,13 @@ fn group_opdata(opdata: &Opdata) -> Result, &'static str> { Ok(args) } - #[derive(Clone, Copy, PartialEq, Eq, Hash)] enum FlatArgTy { Direct, Immediate, Modifier, JumpTarget, - Lit + Lit, } struct ArgWithCommands { @@ -282,13 +356,10 @@ fn flatten_matchers(matchers: &[Matcher]) -> Vec<(FlatArgTy, bool)> { for matcher in matchers { match matcher { - Matcher::Dot - | Matcher::Lit(_) - | Matcher::LitInt(_) - | Matcher::LitFloat(_) => (), - Matcher::Ident - | Matcher::Cond - | Matcher::Imm => args.push((FlatArgTy::Immediate, default)), + Matcher::Dot | Matcher::Lit(_) | Matcher::LitInt(_) | Matcher::LitFloat(_) => (), + Matcher::Ident | Matcher::Cond | Matcher::Imm => { + args.push((FlatArgTy::Immediate, default)) + } Matcher::W | Matcher::X | Matcher::WSP @@ -308,30 +379,30 @@ fn flatten_matchers(matchers: &[Matcher]) -> Vec<(FlatArgTy, bool)> { | Matcher::RegListElement(_, _) => { args.push((FlatArgTy::Direct, default)); args.push((FlatArgTy::Immediate, default)); - }, + } Matcher::Offset => args.push((FlatArgTy::JumpTarget, default)), Matcher::RefBase => args.push((FlatArgTy::Direct, default)), Matcher::RefOffset => { args.push((FlatArgTy::Direct, default)); args.push((FlatArgTy::Immediate, true)); - }, + } Matcher::RefPre => { args.push((FlatArgTy::Direct, default)); args.push((FlatArgTy::Immediate, default)); - }, + } Matcher::RefIndex => { args.push((FlatArgTy::Direct, default)); args.push((FlatArgTy::Direct, default)); args.push((FlatArgTy::Modifier, true)); args.push((FlatArgTy::Immediate, true)); - }, + } Matcher::LitMod(_) => { args.push((FlatArgTy::Immediate, true)); - }, + } Matcher::Mod(_) => { args.push((FlatArgTy::Modifier, default)); args.push((FlatArgTy::Immediate, true)); - }, + } Matcher::End => default = true, } } @@ -348,15 +419,15 @@ fn group_commands(commands: &[Command]) -> (usize, Vec<(Command, usize)>) { Command::A => { cursor += 1; continue; - }, + } Command::C => { cursor -= 1; continue; - }, + } Command::Rwidth(_) => { continue; - }, - _ => () + } + _ => (), } command_idx.push((*command, cursor)); @@ -375,7 +446,7 @@ fn group_commands(commands: &[Command]) -> (usize, Vec<(Command, usize)>) { | Command::Usumdec(_, _) | Command::Ufields(_) | Command::Sbits(_, _) - | Command::Sscaled(_, _,_) + | Command::Sscaled(_, _, _) | Command::Special(_, _) | Command::Rotates(_) | Command::ExtendsW(_) @@ -384,7 +455,7 @@ fn group_commands(commands: &[Command]) -> (usize, Vec<(Command, usize)>) { | Command::CondInv(_) | Command::LitList(_, _) | Command::Offset(_) => cursor += 1, - _ => () + _ => (), } } @@ -395,7 +466,7 @@ fn group_commands(commands: &[Command]) -> (usize, Vec<(Command, usize)>) { fn check_command_sanity(args: &[ArgWithCommands]) -> Result<(), &'static str> { for arg in args { if arg.commands.is_empty() { - return Err("Arg with no commands") + return Err("Arg with no commands"); } for command in &arg.commands { @@ -414,7 +485,7 @@ fn check_command_sanity(args: &[ArgWithCommands]) -> Result<(), &'static str> { | Command::Usumdec(_, _) | Command::Ufields(_) | Command::Sbits(_, _) - | Command::Sscaled(_, _,_) + | Command::Sscaled(_, _, _) | Command::BUbits(_) | Command::BUsum(_) | Command::BSscaled(_, _) @@ -422,16 +493,14 @@ fn check_command_sanity(args: &[ArgWithCommands]) -> Result<(), &'static str> { | Command::Uslice(_, _, _) | Command::Sslice(_, _, _) | Command::Special(_, _) => arg.arg == FlatArgTy::Immediate, - Command::Cond(_) - | Command::CondInv(_) - | Command::LitList(_, _) => arg.arg == FlatArgTy::Lit || arg.arg == FlatArgTy::Immediate, + Command::Cond(_) | Command::CondInv(_) | Command::LitList(_, _) => { + arg.arg == FlatArgTy::Lit || arg.arg == FlatArgTy::Immediate + } Command::Offset(_) => arg.arg == FlatArgTy::JumpTarget, - Command::Rotates(_) - | Command::ExtendsW(_) - | Command::ExtendsX(_) => arg.arg == FlatArgTy::Modifier, - Command::A - | Command::C - | Command::Rwidth(_) => unreachable!() + Command::Rotates(_) | Command::ExtendsW(_) | Command::ExtendsX(_) => { + arg.arg == FlatArgTy::Modifier + } + Command::A | Command::C | Command::Rwidth(_) => unreachable!(), }; if !check { @@ -468,9 +537,7 @@ fn check_command_sanity(args: &[ArgWithCommands]) -> Result<(), &'static str> { | Command::CondInv(_) | Command::LitList(_, _) | Command::Offset(_) => !arg.can_be_default, - Command::A - | Command::C - | Command::Rwidth(_) => unreachable!() + Command::A | Command::C | Command::Rwidth(_) => unreachable!(), }; if !check { @@ -492,66 +559,61 @@ fn name_args(args: &mut [ArgWithCommands]) { for arg in args { match arg.arg { - FlatArgTy::Direct => { - match &arg.commands[0] { - Command::R(_) - | Command::REven(_) - | Command::RNoZr(_) - | Command::R4(_) => { - arg.name = Some(reg_name_list[reg_name_idx].to_string()); - reg_name_idx += 1; - }, - Command::RNext => { - arg.name = Some(format!("{}+1", reg_name_list[reg_name_idx - 1])); - }, - _ => unreachable!() + FlatArgTy::Direct => match &arg.commands[0] { + Command::R(_) | Command::REven(_) | Command::RNoZr(_) | Command::R4(_) => { + arg.name = Some(reg_name_list[reg_name_idx].to_string()); + reg_name_idx += 1; } + Command::RNext => { + arg.name = Some(format!("{}+1", reg_name_list[reg_name_idx - 1])); + } + _ => unreachable!(), }, - FlatArgTy::Immediate => { - match &arg.commands[0] { - Command::Cond(_) - | Command::CondInv(_) => arg.name = None, - Command::LitList(_, name) => arg.name = Some(name.trim_end_matches('S').to_lowercase()), - Command::Ubits(_, _) - | Command::Uscaled(_, _, _) - | Command::Ulist(_, _) - | Command::Urange(_, _, _) - | Command::Usub(_, _, _) - | Command::Unegmod(_, _) - | Command::Usumdec(_, _) - | Command::Ufields(_) - | Command::BUbits(_) - | Command::BUsum(_) - | Command::BUrange(_, _) - | Command::Uslice(_, _, _) => { - arg.name = Some(format!("uimm{}", imm_name_list[imm_name_idx])); - imm_name_idx += 1; - }, - Command::Sbits(_, _) - | Command::Sscaled(_, _,_) - | Command::BSscaled(_, _) - | Command::Sslice(_, _, _) => { - arg.name = Some(format!("simm{}", imm_name_list[imm_name_idx])); - imm_name_idx += 1; - }, - Command::Special(_, _) => { - arg.name = Some(format!("imm{}", imm_name_list[imm_name_idx])); - imm_name_idx += 1; - }, - _ => unreachable!() + FlatArgTy::Immediate => match &arg.commands[0] { + Command::Cond(_) | Command::CondInv(_) => arg.name = None, + Command::LitList(_, name) => { + arg.name = Some(name.trim_end_matches('S').to_lowercase()) } + Command::Ubits(_, _) + | Command::Uscaled(_, _, _) + | Command::Ulist(_, _) + | Command::Urange(_, _, _) + | Command::Usub(_, _, _) + | Command::Unegmod(_, _) + | Command::Usumdec(_, _) + | Command::Ufields(_) + | Command::BUbits(_) + | Command::BUsum(_) + | Command::BUrange(_, _) + | Command::Uslice(_, _, _) => { + arg.name = Some(format!("uimm{}", imm_name_list[imm_name_idx])); + imm_name_idx += 1; + } + Command::Sbits(_, _) + | Command::Sscaled(_, _, _) + | Command::BSscaled(_, _) + | Command::Sslice(_, _, _) => { + arg.name = Some(format!("simm{}", imm_name_list[imm_name_idx])); + imm_name_idx += 1; + } + Command::Special(_, _) => { + arg.name = Some(format!("imm{}", imm_name_list[imm_name_idx])); + imm_name_idx += 1; + } + _ => unreachable!(), }, FlatArgTy::Modifier => arg.name = None, FlatArgTy::JumpTarget => match &arg.commands[0] { Command::Offset(_) => arg.name = Some("".to_string()), - _ => unreachable!() + _ => unreachable!(), }, FlatArgTy::Lit => match &arg.commands[0] { - Command::Cond(_) - | Command::CondInv(_) => arg.name = None, - Command::LitList(_, name) => arg.name = Some(name.trim_end_matches('S').to_lowercase()), - _ => unreachable!() - } + Command::Cond(_) | Command::CondInv(_) => arg.name = None, + Command::LitList(_, name) => { + arg.name = Some(name.trim_end_matches('S').to_lowercase()) + } + _ => unreachable!(), + }, } } } @@ -571,7 +633,7 @@ fn format_constraints(args: &[ArgWithCommands]) -> Option { None } else { let len = constraints.len(); - Some(format!(" ({})", &constraints[0 .. len - 2])) + Some(format!(" ({})", &constraints[0..len - 2])) } } @@ -581,32 +643,73 @@ fn emit_constraints(name: &str, prevname: &str, commands: &[Command], buf: &mut Command::R4(_) => write!(buf, "{} is 0-15", name), Command::RNoZr(_) => write!(buf, "{} is 0-30", name), Command::REven(_) => write!(buf, "{} is even", name), - Command::Ubits(_, bits) - | Command::BUbits(bits) => write!(buf, "#{} < {}", name, 1u32 << bits), - Command::Uscaled(_, bits, scale) => write!(buf, "#{} < {}, #{} = {} * N", name, 1u32 << (bits + scale), name, 1u32 << scale), + Command::Ubits(_, bits) | Command::BUbits(bits) => { + write!(buf, "#{} < {}", name, 1u32 << bits) + } + Command::Uscaled(_, bits, scale) => write!( + buf, + "#{} < {}, #{} = {} * N", + name, + 1u32 << (bits + scale), + name, + 1u32 << scale + ), Command::Ulist(_, list) => { - let numbers = list.iter().map(|n| n.to_string()).collect::>().join(", "); + let numbers = list + .iter() + .map(|n| n.to_string()) + .collect::>() + .join(", "); write!(buf, "#{} = [{}]", name, numbers) - }, - Command::Urange(_, min, max) - | Command::BUrange(min, max) => write!(buf, "{} <= #{} <= {}", min, name, max), - Command::Usub(_, bits, addval) => write!(buf, "{} <= #{} <= {}", u32::from(*addval) + 1 - (1u32 << bits), name, addval), + } + Command::Urange(_, min, max) | Command::BUrange(min, max) => { + write!(buf, "{} <= #{} <= {}", min, name, max) + } + Command::Usub(_, bits, addval) => write!( + buf, + "{} <= #{} <= {}", + u32::from(*addval) + 1 - (1u32 << bits), + name, + addval + ), Command::Unegmod(_, bits) => write!(buf, "0 <= #{} < {}", name, 1u32 << bits), - Command::Usumdec(_, bits) - | Command::BUsum(bits) => write!(buf, "1 <= #{} <= {} - {}", name, 1u32 << bits, prevname), + Command::Usumdec(_, bits) | Command::BUsum(bits) => { + write!(buf, "1 <= #{} <= {} - {}", name, 1u32 << bits, prevname) + } Command::Ufields(fields) => write!(buf, "#{} < {}", name, 1u32 << fields.len()), - Command::Sbits(_, bits) => write!(buf, "-{} <= #{} < {}", 1u32 << (bits - 1), name, 1u32 << (bits - 1)), - Command::Sscaled(_, bits, scale) - | Command::BSscaled(bits, scale) => write!(buf, "-{} <= #{} < {}, #{} = {} * N", 1u32 << (bits + scale - 1), name, 1u32 << (bits + scale - 1), name, 1u32 << scale), + Command::Sbits(_, bits) => write!( + buf, + "-{} <= #{} < {}", + 1u32 << (bits - 1), + name, + 1u32 << (bits - 1) + ), + Command::Sscaled(_, bits, scale) | Command::BSscaled(bits, scale) => write!( + buf, + "-{} <= #{} < {}, #{} = {} * N", + 1u32 << (bits + scale - 1), + name, + 1u32 << (bits + scale - 1), + name, + 1u32 << scale + ), Command::Special(_, SpecialComm::WIDE_IMMEDIATE_W) | Command::Special(_, SpecialComm::WIDE_IMMEDIATE_X) | Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_W) - | Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_X) => write!(buf, "#{} is a wide immediate", name), + | Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_X) => { + write!(buf, "#{} is a wide immediate", name) + } Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_W) - | Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_X) => write!(buf, "#{} is a logical immediate", name), + | Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_X) => { + write!(buf, "#{} is a logical immediate", name) + } Command::Special(_, SpecialComm::FLOAT_IMMEDIATE) - | Command::Special(_, SpecialComm::SPLIT_FLOAT_IMMEDIATE) => write!(buf, "#{} is a floating point immediate", name), - Command::Special(_, SpecialComm::STRETCHED_IMMEDIATE) => write!(buf, "#{} is a stretched immediate", name), + | Command::Special(_, SpecialComm::SPLIT_FLOAT_IMMEDIATE) => { + write!(buf, "#{} is a floating point immediate", name) + } + Command::Special(_, SpecialComm::STRETCHED_IMMEDIATE) => { + write!(buf, "#{} is a stretched immediate", name) + } Command::Offset(Relocation::B) => write!(buf, "offset is 26 bit, 4-byte aligned"), Command::Offset(Relocation::BCOND) => write!(buf, "offset is 19 bit, 4-byte aligned"), Command::Offset(Relocation::ADR) => write!(buf, "offset is 21 bit"), @@ -614,8 +717,9 @@ fn emit_constraints(name: &str, prevname: &str, commands: &[Command], buf: &mut Command::Offset(Relocation::TBZ) => write!(buf, "offset is 14 bit, 4-byte aligned"), Command::Offset(Relocation::LITERAL32) => write!(buf, "offset is 32 bit>"), Command::Offset(Relocation::LITERAL64) => write!(buf, "offset is 64 bit>"), - _ => continue - }.unwrap(); + _ => continue, + } + .unwrap(); write!(buf, ", ").unwrap(); break; @@ -624,16 +728,15 @@ fn emit_constraints(name: &str, prevname: &str, commands: &[Command], buf: &mut #[cfg(feature = "dynasm_extract")] pub fn extract_opdata(name: &str, data: &Opdata) -> Vec { - let has_simd_full_width = data.matchers.iter().any(|m| match m { Matcher::V(_) | Matcher::RegList(_, _) => true, - _ => false + _ => false, }); let form_count = 1 + has_simd_full_width as u8; let mut forms = Vec::new(); - for i in 0 .. form_count { + for i in 0..form_count { let mut buf = format!("\"{}", name); let mut first = true; @@ -670,40 +773,101 @@ pub fn extract_opdata(name: &str, data: &Opdata) -> Vec { Matcher::Lit(s) => write!(buf, "{}", s).unwrap(), Matcher::LitInt(v) => write!(buf, "{}", v).unwrap(), Matcher::LitFloat(v) => write!(buf, "{:.5}", v).unwrap(), - Matcher::Ident - | Matcher::Cond => write!(buf, "", arg_idx).unwrap(), + Matcher::Ident | Matcher::Cond => write!(buf, "", arg_idx).unwrap(), Matcher::Imm => write!(buf, "", arg_idx).unwrap(), - Matcher::W => write!(buf, "", arg_idx).unwrap(), - Matcher::X => write!(buf, "", arg_idx).unwrap(), + Matcher::W => write!(buf, "", arg_idx).unwrap(), + Matcher::X => write!(buf, "", arg_idx).unwrap(), Matcher::WSP => write!(buf, "", arg_idx).unwrap(), Matcher::XSP => write!(buf, "", arg_idx).unwrap(), - Matcher::B => write!(buf, "", arg_idx).unwrap(), - Matcher::H => write!(buf, "", arg_idx).unwrap(), - Matcher::S => write!(buf, "", arg_idx).unwrap(), - Matcher::D => write!(buf, "", arg_idx).unwrap(), - Matcher::Q => write!(buf, "", arg_idx).unwrap(), + Matcher::B => write!(buf, "", arg_idx).unwrap(), + Matcher::H => write!(buf, "", arg_idx).unwrap(), + Matcher::S => write!(buf, "", arg_idx).unwrap(), + Matcher::D => write!(buf, "", arg_idx).unwrap(), + Matcher::Q => write!(buf, "", arg_idx).unwrap(), Matcher::V(s) => { let width = if i == 0 { 16 } else { 8 }; - write!(buf, ".{}{}", arg_idx, size_to_string(*s), width / s.in_bytes()).unwrap(); - }, - Matcher::VStatic(s, c) => write!(buf, ".{}{}", arg_idx, size_to_string(*s), c).unwrap(), - Matcher::VElement(s) => write!(buf, ".{}[]", arg_idx, size_to_string(*s), arg_idx + 1).unwrap(), - Matcher::VElementStatic(s, element) => write!(buf, ".{}[{}]", arg_idx, size_to_string(*s), element).unwrap(), - Matcher::VStaticElement(s, c) => write!(buf, ".{}{}[]", arg_idx, size_to_string(*s), c, arg_idx + 1).unwrap(), + write!( + buf, + ".{}{}", + arg_idx, + size_to_string(*s), + width / s.in_bytes() + ) + .unwrap(); + } + Matcher::VStatic(s, c) => { + write!(buf, ".{}{}", arg_idx, size_to_string(*s), c).unwrap() + } + Matcher::VElement(s) => write!( + buf, + ".{}[]", + arg_idx, + size_to_string(*s), + arg_idx + 1 + ) + .unwrap(), + Matcher::VElementStatic(s, element) => { + write!(buf, ".{}[{}]", arg_idx, size_to_string(*s), element).unwrap() + } + Matcher::VStaticElement(s, c) => write!( + buf, + ".{}{}[]", + arg_idx, + size_to_string(*s), + c, + arg_idx + 1 + ) + .unwrap(), Matcher::RegList(a, s) => { let width = if i == 0 { 16 } else { 8 }; - write!(buf, "{{.{}{} * {}}}", arg_idx, size_to_string(*s), width / s.in_bytes(), a).unwrap(); - }, - Matcher::RegListStatic(a, s, c) => write!(buf, "{{.{}{} * {}}}", arg_idx, size_to_string(*s), c, a).unwrap(), - Matcher::RegListElement(a, s) => write!(buf, "{{.{} * {}}}[]", arg_idx, size_to_string(*s), a, arg_idx + 1).unwrap(), + write!( + buf, + "{{.{}{} * {}}}", + arg_idx, + size_to_string(*s), + width / s.in_bytes(), + a + ) + .unwrap(); + } + Matcher::RegListStatic(a, s, c) => write!( + buf, + "{{.{}{} * {}}}", + arg_idx, + size_to_string(*s), + c, + a + ) + .unwrap(), + Matcher::RegListElement(a, s) => write!( + buf, + "{{.{} * {}}}[]", + arg_idx, + size_to_string(*s), + a, + arg_idx + 1 + ) + .unwrap(), Matcher::Offset => write!(buf, "", arg_idx).unwrap(), - Matcher::RefBase => write!(buf, "[]", arg_idx).unwrap(), - Matcher::RefOffset => write!(buf, "[ <, > ]", arg_idx, arg_idx + 1).unwrap(), - Matcher::RefPre => write!(buf, "[, ]!", arg_idx, arg_idx + 1).unwrap(), + Matcher::RefBase => write!(buf, "[]", arg_idx).unwrap(), + Matcher::RefOffset => { + write!(buf, "[ <, > ]", arg_idx, arg_idx + 1).unwrap() + } + Matcher::RefPre => { + write!(buf, "[, ]!", arg_idx, arg_idx + 1).unwrap() + } Matcher::RefIndex => { constraints.push(format!("{}: ModWX()", arg_idx + 2)); - write!(buf, "[, < , < > > ]", arg_idx, arg_idx + 1, arg_idx + 2, arg_idx + 3).unwrap(); - }, + write!( + buf, + "[, < , < > > ]", + arg_idx, + arg_idx + 1, + arg_idx + 2, + arg_idx + 3 + ) + .unwrap(); + } Matcher::LitMod(m) => { buf.push_str(m.as_str()); if !m.expr_required() { @@ -711,7 +875,7 @@ pub fn extract_opdata(name: &str, data: &Opdata) -> Vec { } else { write!(buf, " ", arg_idx).unwrap(); } - }, + } Matcher::Mod(mods) => { let mut required = false; let mut options = Vec::new(); @@ -727,14 +891,14 @@ pub fn extract_opdata(name: &str, data: &Opdata) -> Vec { } else { write!(buf, " ", arg_idx, arg_idx + 1).unwrap(); } - }, - Matcher::End => () + } + Matcher::End => (), } arg_idx += matcher.flatarg_count(); } - for _ in 0 .. end_count { + for _ in 0..end_count { buf.push_str(" >"); } @@ -759,52 +923,109 @@ fn extract_constraints(args: &[ArgWithCommands]) -> Vec { Command::RNoZr(_) => format!("R(31)"), Command::R4(_) => format!("R(16)"), Command::RNext => format!("RNext()"), - Command::Ubits(_, bits) - | Command::BUbits(bits) => format!("Range(0, {}, 1)", 1u32 << bits), - Command::Uscaled(_, bits, scale) => format!("Range(0, {}, {})", 1u32 << (bits + scale), 1u32 << scale), + Command::Ubits(_, bits) | Command::BUbits(bits) => { + format!("Range(0, {}, 1)", 1u32 << bits) + } + Command::Uscaled(_, bits, scale) => { + format!("Range(0, {}, {})", 1u32 << (bits + scale), 1u32 << scale) + } Command::Ulist(_, list) => { - let numbers = list.iter().map(|n| n.to_string()).collect::>().join(", "); + let numbers = list + .iter() + .map(|n| n.to_string()) + .collect::>() + .join(", "); format!("List({})", numbers) - }, - Command::Urange(_, min, max) - | Command::BUrange(min, max) => format!("Range({}, {}+1, 1)", min, max), - Command::Usub(_, bits, addval) => format!("Range({}, {}+1, 1)", *addval as u32 + 1 - (1u32 << bits), addval), + } + Command::Urange(_, min, max) | Command::BUrange(min, max) => { + format!("Range({}, {}+1, 1)", min, max) + } + Command::Usub(_, bits, addval) => format!( + "Range({}, {}+1, 1)", + *addval as u32 + 1 - (1u32 << bits), + addval + ), Command::Unegmod(_, bits) => format!("Range(0, {}, 1)", 1u32 << bits), - Command::Usumdec(_, bits) - | Command::BUsum(bits) => format!("Range2(1, {}+1, 1)", 1u32 << bits), + Command::Usumdec(_, bits) | Command::BUsum(bits) => { + format!("Range2(1, {}+1, 1)", 1u32 << bits) + } Command::Ufields(fields) => format!("Range(0, {}, 1)", 1u32 << fields.len()), - Command::Sbits(_, bits) => format!("Range(-{}, {}, 1)", 1u32 << (bits - 1), 1u32 << (bits - 1)), - Command::Sscaled(_, bits, scale) - | Command::BSscaled(bits, scale) => format!("Range(-{}, {}, {})", 1u32 << (bits + scale - 1), 1u32 << (bits + scale - 1), 1u32 << scale), + Command::Sbits(_, bits) => { + format!("Range(-{}, {}, 1)", 1u32 << (bits - 1), 1u32 << (bits - 1)) + } + Command::Sscaled(_, bits, scale) | Command::BSscaled(bits, scale) => format!( + "Range(-{}, {}, {})", + 1u32 << (bits + scale - 1), + 1u32 << (bits + scale - 1), + 1u32 << scale + ), Command::Special(_, SpecialComm::WIDE_IMMEDIATE_W) => format!("Special('wide_w')"), - | Command::Special(_, SpecialComm::WIDE_IMMEDIATE_X) => format!("Special('wide_x')"), - | Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_W) => format!("Special('inverted_w')"), - | Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_X) => format!("Special('inverted_x')"), - Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_W) => format!("Special('logical_w')"), - | Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_X) => format!("Special('logical_x')"), + Command::Special(_, SpecialComm::WIDE_IMMEDIATE_X) => format!("Special('wide_x')"), + Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_W) => { + format!("Special('inverted_w')") + } + Command::Special(_, SpecialComm::INVERTED_WIDE_IMMEDIATE_X) => { + format!("Special('inverted_x')") + } + Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_W) => { + format!("Special('logical_w')") + } + Command::Special(_, SpecialComm::LOGICAL_IMMEDIATE_X) => { + format!("Special('logical_x')") + } Command::Special(_, SpecialComm::FLOAT_IMMEDIATE) - | Command::Special(_, SpecialComm::SPLIT_FLOAT_IMMEDIATE) => format!("Special('float')"), - Command::Special(_, SpecialComm::STRETCHED_IMMEDIATE) => format!("Special('stretched')"), - Command::Offset(Relocation::B) => format!("Range(-{}, {}, {})", 1<<27, 1<<27, 4), - Command::Offset(Relocation::BCOND) => format!("Range(-{}, {}, {})", 1<<18, 1<<18, 4), - Command::Offset(Relocation::ADR) => format!("Range(-{}, {}, {})", 1<<20, 1<<20, 1), - Command::Offset(Relocation::ADRP) => format!("Range(-{}, {}, {})", 1u64<<32, 1u64<<32, 4096), - Command::Offset(Relocation::TBZ) => format!("Range(-{}, {}, {})", 1<<15, 1<<15, 4), - Command::Offset(Relocation::LITERAL32) => format!("Range(-{}, {}, {})", 1<<31, 1<<31, 1), - Command::Offset(Relocation::LITERAL64) => format!("Range(-{}, {}, {})", 1u64<<63, 1u64<<63, 1), + | Command::Special(_, SpecialComm::SPLIT_FLOAT_IMMEDIATE) => { + format!("Special('float')") + } + Command::Special(_, SpecialComm::STRETCHED_IMMEDIATE) => { + format!("Special('stretched')") + } + Command::Offset(Relocation::B) => { + format!("Range(-{}, {}, {})", 1 << 27, 1 << 27, 4) + } + Command::Offset(Relocation::BCOND) => { + format!("Range(-{}, {}, {})", 1 << 18, 1 << 18, 4) + } + Command::Offset(Relocation::ADR) => { + format!("Range(-{}, {}, {})", 1 << 20, 1 << 20, 1) + } + Command::Offset(Relocation::ADRP) => { + format!("Range(-{}, {}, {})", 1u64 << 32, 1u64 << 32, 4096) + } + Command::Offset(Relocation::TBZ) => { + format!("Range(-{}, {}, {})", 1 << 15, 1 << 15, 4) + } + Command::Offset(Relocation::LITERAL32) => { + format!("Range(-{}, {}, {})", 1 << 31, 1 << 31, 1) + } + Command::Offset(Relocation::LITERAL64) => { + format!("Range(-{}, {}, {})", 1u64 << 63, 1u64 << 63, 1) + } Command::Cond(_) => { let keys: Vec<_> = COND_MAP.keys().map(|k| format!("\"{}\"", k)).collect(); format!("List({})", keys.join(", ")) - }, + } Command::CondInv(_) => { - let keys: Vec<_> = COND_MAP.iter().filter_map(|(k, v)| if *v < 14 { Some(format!("\"{}\"", k)) } else { None }).collect(); + let keys: Vec<_> = COND_MAP + .iter() + .filter_map(|(k, v)| { + if *v < 14 { + Some(format!("\"{}\"", k)) + } else { + None + } + }) + .collect(); format!("List({})", keys.join(", ")) - }, + } Command::LitList(_, name) => { - let keys: Vec<_> = SPECIAL_IDENT_MAP[name].keys().map(|k| format!("\"{}\"", k)).collect(); + let keys: Vec<_> = SPECIAL_IDENT_MAP[name] + .keys() + .map(|k| format!("\"{}\"", k)) + .collect(); format!("List({})", keys.join(", ")) } - _ => continue + _ => continue, }; constraints.push(format!("{}: {}", i, constraint)); @@ -812,4 +1033,4 @@ fn extract_constraints(args: &[ArgWithCommands]) -> Vec { } } constraints -} \ No newline at end of file +} diff --git a/plugin/src/arch/aarch64/matching.rs b/plugin/src/arch/aarch64/matching.rs index 24153008de..a9005618ed 100644 --- a/plugin/src/arch/aarch64/matching.rs +++ b/plugin/src/arch/aarch64/matching.rs @@ -1,16 +1,22 @@ -use proc_macro_error::emit_error; use proc_macro2::Span; +use proc_macro_error::emit_error; -use super::Context; -use super::ast::{Instruction, RawArg, CleanArg, FlatArg, RefItem, Register, RegFamily, RefKind, Modifier}; -use super::aarch64data::{Opdata, Matcher, COND_MAP, get_mnemonic_data}; +use super::aarch64data::{get_mnemonic_data, Matcher, Opdata, COND_MAP}; +use super::ast::{ + CleanArg, FlatArg, Instruction, Modifier, RawArg, RefItem, RefKind, RegFamily, Register, +}; use super::debug::format_opdata_list; +use super::Context; -use crate::common::{Size, JumpKind}; -use crate::parse_helpers::{as_ident, as_number, as_float}; +use crate::common::{JumpKind, Size}; +use crate::parse_helpers::{as_float, as_ident, as_number}; /// Try finding an appropriate definition that matches the given instruction / arguments. -pub(super) fn match_instruction(_ctx: &mut Context, instruction: &Instruction, args: Vec) -> Result> { +pub(super) fn match_instruction( + _ctx: &mut Context, + instruction: &Instruction, + args: Vec, +) -> Result> { // sanitize our arg list to remove any structures that cannot be matched on let args = sanitize_args(args)?; @@ -25,7 +31,6 @@ pub(super) fn match_instruction(_ctx: &mut Context, instruction: &Instruction, a // matching loop for data in opdata { if let Some(mut ctx) = match_args(&args, data) { - // flatten the arg list for the encoding vm flatten_args(args, data, &mut ctx); @@ -33,9 +38,11 @@ pub(super) fn match_instruction(_ctx: &mut Context, instruction: &Instruction, a } } - Err(Some( - format!("'{}': instruction format mismatch, expected one of the following forms:\n{}", &name, format_opdata_list(&name, opdata)) - )) + Err(Some(format!( + "'{}': instruction format mismatch, expected one of the following forms:\n{}", + &name, + format_opdata_list(&name, opdata) + ))) } /// Sanitizes arguments, ensuring that: @@ -52,7 +59,7 @@ fn sanitize_args(args: Vec) -> Result, Option> { RawArg::Direct { span, reg } => { sanitize_register(span, ®)?; res.push(CleanArg::Direct { span, reg }); - }, + } // offsets: validate that only relative jumps are allowed (no extern relocations) RawArg::JumpTarget { jump } => { if let JumpKind::Bare(_) = jump.kind { @@ -60,28 +67,31 @@ fn sanitize_args(args: Vec) -> Result, Option> { return Err(None); } res.push(CleanArg::JumpTarget { jump }); - }, + } // modifier: LSL LSR ASR ROR and MSL require an immediate. RawArg::Modifier { span, modifier } => { if modifier.expr.is_none() && modifier.op.expr_required() { - emit_error!(span, "LSL, LSR, ASR, ROR and MSL modifiers require a shift immediate."); + emit_error!( + span, + "LSL, LSR, ASR, ROR and MSL modifiers require a shift immediate." + ); return Err(None); } res.push(CleanArg::Modifier { span, modifier }); - }, + } // dot: passthrough RawArg::Dot { span } => { - res.push(CleanArg::Dot { span } ); - }, + res.push(CleanArg::Dot { span }); + } // lit: passthrough RawArg::Lit { ident } => { - res.push(CleanArg::Lit { ident } ); - }, + res.push(CleanArg::Lit { ident }); + } // immediate: pass through RawArg::Immediate { value, prefixed } => { res.push(CleanArg::Immediate { value, prefixed }) - }, + } // reference: first, assert the used indexing mode (base, offset, pre-indexed, or register-indexed) // then, verify that the base register is always an XSP register // for the register-indexed mode, additionally verify that the index register is either an W or an X register @@ -97,36 +107,37 @@ fn sanitize_args(args: Vec) -> Result, Option> { Some(_) => { emit_error!(span, "First item in a reference list has to be a register"); return Err(None); - }, - None => unreachable!("Cannot create empty references in the parser") + } + None => unreachable!("Cannot create empty references in the parser"), }; // second item is either a register or an offset match items.next() { - Some(RefItem::Direct { reg, ..}) => { + Some(RefItem::Direct { reg, .. }) => { kind = RefKind::Indexed(reg, None); - }, + } Some(RefItem::Immediate { value }) => { kind = RefKind::Offset(value); - }, + } Some(RefItem::Modifier { .. }) => { - emit_error!(span, "Cannot have a modifier without index register or offset"); + emit_error!( + span, + "Cannot have a modifier without index register or offset" + ); return Err(None); - }, - None => hit_end = true + } + None => hit_end = true, } // if the second item was a register, there could be a modifier if let RefKind::Indexed(_, ref mut modifier) = kind { match items.next() { - Some(RefItem::Modifier { modifier: m, ..}) => { - *modifier = Some(m) - }, + Some(RefItem::Modifier { modifier: m, .. }) => *modifier = Some(m), Some(_) => { emit_error!(span, "Too many items in reference list"); return Err(None); - }, - None => hit_end = true + } + None => hit_end = true, } } @@ -141,14 +152,20 @@ fn sanitize_args(args: Vec) -> Result, Option> { if let RefKind::Offset(offset) = kind { kind = RefKind::PreIndexed(offset); } else { - emit_error!(span, "Cannot use pre-indexed addressing without an immediate offset."); + emit_error!( + span, + "Cannot use pre-indexed addressing without an immediate offset." + ); return Err(None); } } // sanitizaiton // base can only be a Xn|SP reg - if !(base.size() == Size::QWORD && (base.family() == RegFamily::INTEGERSP || (base.family() == RegFamily::INTEGER && !base.kind().is_zero_reg()))) { + if !(base.size() == Size::QWORD + && (base.family() == RegFamily::INTEGERSP + || (base.family() == RegFamily::INTEGER && !base.kind().is_zero_reg()))) + { emit_error!(span, "Base register can only be a Xn|SP register"); return Err(None); } @@ -162,8 +179,15 @@ fn sanitize_args(args: Vec) -> Result, Option> { // limited set of allowed modifiers. if let Some(ref m) = modifier { - if if index.size() == Size::QWORD {m.op != Modifier::LSL && m.op != Modifier::SXTX} else {m.op != Modifier::SXTW && m.op != Modifier::UXTW} { - emit_error!(span, "Invalid modifier for the selected base register type"); + if if index.size() == Size::QWORD { + m.op != Modifier::LSL && m.op != Modifier::SXTX + } else { + m.op != Modifier::SXTW && m.op != Modifier::UXTW + } { + emit_error!( + span, + "Invalid modifier for the selected base register type" + ); return Err(None); } @@ -175,15 +199,16 @@ fn sanitize_args(args: Vec) -> Result, Option> { } } - res.push(CleanArg::Reference { - span, - base, - kind - }); - }, + res.push(CleanArg::Reference { span, base, kind }); + } // registerlist in dash notation: verify that all used registers have the same element size / lane count. // then, canonicalize it to first register / count and confirm it is a valid bare vector register - RawArg::DashList { span, first, last, element } => { + RawArg::DashList { + span, + first, + last, + element, + } => { let mut s = ListSanitizer::new(); s.sanitize(span, &first)?; s.sanitize(span, &last)?; @@ -200,13 +225,16 @@ fn sanitize_args(args: Vec) -> Result, Option> { span, first, amount, - element + element, }) - - }, + } // registerlist in comma notation: verify that all used registers have the same element size / lane count. // then, canonicalize it to first register / count and confirm it is a valid bare vector register - RawArg::CommaList { span, items, element } => { + RawArg::CommaList { + span, + items, + element, + } => { if items.len() > 32 { emit_error!(span, "Too many registers in register list."); return Err(None); @@ -221,12 +249,14 @@ fn sanitize_args(args: Vec) -> Result, Option> { let code = first.kind().encode(); let mut next_code = code; - for item in items { s.sanitize(span, &item)?; next_code = (next_code + 1) % 32; if item.kind().encode() != next_code { - emit_error!(span, "Registers in register list are not monotonically incrementing"); + emit_error!( + span, + "Registers in register list are not monotonically incrementing" + ); return Err(None); } } @@ -237,13 +267,21 @@ fn sanitize_args(args: Vec) -> Result, Option> { amount, element, }) - }, + } // registerlist in amount notation: verify the register and confirm it is a valid bare vector register - RawArg::AmountList { span, first, amount, element } => { + RawArg::AmountList { + span, + first, + amount, + element, + } => { sanitize_register(span, &first)?; if let Register::Vector(v) = &first { if v.element.is_some() { - emit_error!(span, "Cannot use element specifiers inside of register lists."); + emit_error!( + span, + "Cannot use element specifiers inside of register lists." + ); return Err(None); } } else { @@ -259,7 +297,10 @@ fn sanitize_args(args: Vec) -> Result, Option> { } amount as u8 } else { - emit_error!(span, "Register list requires a contant amount of registers specified"); + emit_error!( + span, + "Register list requires a contant amount of registers specified" + ); return Err(None); }; @@ -278,14 +319,14 @@ fn sanitize_args(args: Vec) -> Result, Option> { struct ListSanitizer { pub element_size: Option, - pub lanes: Option> + pub lanes: Option>, } impl ListSanitizer { fn new() -> ListSanitizer { ListSanitizer { element_size: None, - lanes: None + lanes: None, } } @@ -294,12 +335,18 @@ impl ListSanitizer { sanitize_register(span, register)?; if let Register::Vector(v) = register { if v.element.is_some() { - emit_error!(span, "Cannot use element specifiers inside of register lists."); + emit_error!( + span, + "Cannot use element specifiers inside of register lists." + ); return Err(None); } if v.kind.is_dynamic() { - emit_error!(span, "Cannot use dynamic registers inside of a comma/dash register list."); + emit_error!( + span, + "Cannot use dynamic registers inside of a comma/dash register list." + ); return Err(None); } @@ -334,14 +381,13 @@ fn sanitize_register(span: Span, register: &Register) -> Result<(), Option 16 { emit_error!(span, "Overly wide vector register."); - return Err(None) + return Err(None); } } } Ok(()) } - /// struct containing information found during a match #[derive(Debug)] pub struct MatchData { @@ -355,29 +401,34 @@ impl MatchData { MatchData { simd_full_width: None, data, - args: Vec::new() + args: Vec::new(), } } } - impl Matcher { /// Returns if this matcher matches the given argument pub fn matches(&self, arg: &CleanArg, ctx: &mut MatchData) -> bool { match arg { - CleanArg::Reference { kind, .. } => { - match kind { - RefKind::Base => *self == Matcher::RefBase || *self == Matcher::RefOffset, - RefKind::Offset(_) => *self == Matcher::RefOffset, - RefKind::PreIndexed(_) => *self == Matcher::RefPre, - RefKind::Indexed(_, _) => *self == Matcher::RefIndex, - } + CleanArg::Reference { kind, .. } => match kind { + RefKind::Base => *self == Matcher::RefBase || *self == Matcher::RefOffset, + RefKind::Offset(_) => *self == Matcher::RefOffset, + RefKind::PreIndexed(_) => *self == Matcher::RefPre, + RefKind::Indexed(_, _) => *self == Matcher::RefIndex, }, - CleanArg::RegList { amount, element, first, .. } => { + CleanArg::RegList { + amount, + element, + first, + .. + } => { let first = first.assume_vector(); match self { Matcher::RegList(m_amount, element_size) => { - if m_amount != amount || *element_size != first.element_size() || element.is_some() { + if m_amount != amount + || *element_size != first.element_size() + || element.is_some() + { return false; } @@ -385,99 +436,128 @@ impl Matcher { let full_width = match bytes { 8 => false, 16 => true, - _ => return false + _ => return false, }; match ctx.simd_full_width { None => { ctx.simd_full_width = Some(full_width); true } - Some(f) => f == full_width + Some(f) => f == full_width, } } else { false } - }, - Matcher::RegListStatic(m_amount, element_size, lanecount) => - m_amount == amount && *element_size == first.element_size() && element.is_none() && first.lanes == Some(*lanecount), - Matcher::RegListElement(m_amount, element_size) => - m_amount == amount && *element_size == first.element_size() && element.is_some(), - _ => false + } + Matcher::RegListStatic(m_amount, element_size, lanecount) => { + m_amount == amount + && *element_size == first.element_size() + && element.is_none() + && first.lanes == Some(*lanecount) + } + Matcher::RegListElement(m_amount, element_size) => { + m_amount == amount + && *element_size == first.element_size() + && element.is_some() + } + _ => false, } - }, - CleanArg::Direct { reg, .. } => { - match reg { - Register::Vector(ref v) => match self { - Matcher::V(size) => { - if *size != v.element_size || v.element.is_some() { - return false; - } - if let Some(bytes) = v.full_size() { - let full_width = match bytes { - 8 => false, - 16 => true, - _ => return false - }; - match ctx.simd_full_width { - None => { - ctx.simd_full_width = Some(full_width); - true - } - Some(f) => f == full_width + } + CleanArg::Direct { reg, .. } => match reg { + Register::Vector(ref v) => match self { + Matcher::V(size) => { + if *size != v.element_size || v.element.is_some() { + return false; + } + if let Some(bytes) = v.full_size() { + let full_width = match bytes { + 8 => false, + 16 => true, + _ => return false, + }; + match ctx.simd_full_width { + None => { + ctx.simd_full_width = Some(full_width); + true } - } else { - false + Some(f) => f == full_width, } - }, - Matcher::VStatic(size, lanes) => - *size == v.element_size && v.element.is_none() && v.lanes == Some(*lanes), - Matcher::VElement(size) => - *size == v.element_size && v.element.is_some(), - Matcher::VElementStatic(size, element) => - *size == v.element_size && v.element.as_ref().and_then(as_number) == Some(u64::from(*element)), - Matcher::VStaticElement(size, lanes) => - *size == v.element_size && v.element.is_some() && v.lanes == Some(*lanes), - _ => false - }, - Register::Scalar(ref s) => match self { - Matcher::W => s.size() == Size::DWORD && s.kind.family() == RegFamily::INTEGER, - Matcher::X => s.size() == Size::QWORD && s.kind.family() == RegFamily::INTEGER, - Matcher::WSP => s.size() == Size::DWORD && (s.kind.family() == RegFamily::INTEGERSP || (!s.kind.is_dynamic() && s.kind.family() == RegFamily::INTEGER && !s.kind.is_zero_reg())), - Matcher::XSP => s.size() == Size::QWORD && (s.kind.family() == RegFamily::INTEGERSP || (!s.kind.is_dynamic() && s.kind.family() == RegFamily::INTEGER && !s.kind.is_zero_reg())), - Matcher::B => s.size() == Size::BYTE && s.kind.family() == RegFamily::SIMD, - Matcher::H => s.size() == Size::WORD && s.kind.family() == RegFamily::SIMD, - Matcher::S => s.size() == Size::DWORD && s.kind.family() == RegFamily::SIMD, - Matcher::D => s.size() == Size::QWORD && s.kind.family() == RegFamily::SIMD, - Matcher::Q => s.size() == Size::OWORD && s.kind.family() == RegFamily::SIMD, - _ => false + } else { + false + } } - } + Matcher::VStatic(size, lanes) => { + *size == v.element_size && v.element.is_none() && v.lanes == Some(*lanes) + } + Matcher::VElement(size) => *size == v.element_size && v.element.is_some(), + Matcher::VElementStatic(size, element) => { + *size == v.element_size + && v.element.as_ref().and_then(as_number) == Some(u64::from(*element)) + } + Matcher::VStaticElement(size, lanes) => { + *size == v.element_size && v.element.is_some() && v.lanes == Some(*lanes) + } + _ => false, + }, + Register::Scalar(ref s) => match self { + Matcher::W => s.size() == Size::DWORD && s.kind.family() == RegFamily::INTEGER, + Matcher::X => s.size() == Size::QWORD && s.kind.family() == RegFamily::INTEGER, + Matcher::WSP => { + s.size() == Size::DWORD + && (s.kind.family() == RegFamily::INTEGERSP + || (!s.kind.is_dynamic() + && s.kind.family() == RegFamily::INTEGER + && !s.kind.is_zero_reg())) + } + Matcher::XSP => { + s.size() == Size::QWORD + && (s.kind.family() == RegFamily::INTEGERSP + || (!s.kind.is_dynamic() + && s.kind.family() == RegFamily::INTEGER + && !s.kind.is_zero_reg())) + } + Matcher::B => s.size() == Size::BYTE && s.kind.family() == RegFamily::SIMD, + Matcher::H => s.size() == Size::WORD && s.kind.family() == RegFamily::SIMD, + Matcher::S => s.size() == Size::DWORD && s.kind.family() == RegFamily::SIMD, + Matcher::D => s.size() == Size::QWORD && s.kind.family() == RegFamily::SIMD, + Matcher::Q => s.size() == Size::OWORD && s.kind.family() == RegFamily::SIMD, + _ => false, + }, }, CleanArg::JumpTarget { .. } => *self == Matcher::Offset, - CleanArg::Immediate { prefixed: true, value } => match self { - Matcher::Imm - | Matcher::Offset => true, + CleanArg::Immediate { + prefixed: true, + value, + } => match self { + Matcher::Imm | Matcher::Offset => true, Matcher::LitInt(v) => as_number(value) == Some(u64::from(*v)), Matcher::LitFloat(v) => as_float(value) == Some(f64::from(*v)), _ => false, }, - CleanArg::Immediate { prefixed: false, value} => match self { + CleanArg::Immediate { + prefixed: false, + value, + } => match self { Matcher::Imm => true, Matcher::Offset => true, Matcher::Ident => as_ident(value).is_some(), - Matcher::Cond => if let Some(i) = as_ident(value) { - COND_MAP.contains_key(&&*i.to_string()) - } else { - false - }, - Matcher::Lit(s) => if let Some(i) = as_ident(value) { - i == s - } else { - false - }, + Matcher::Cond => { + if let Some(i) = as_ident(value) { + COND_MAP.contains_key(&&*i.to_string()) + } else { + false + } + } + Matcher::Lit(s) => { + if let Some(i) = as_ident(value) { + i == s + } else { + false + } + } Matcher::LitInt(v) => as_number(value) == Some(u64::from(*v)), Matcher::LitFloat(v) => as_float(value) == Some(f64::from(*v)), - _ => false + _ => false, }, CleanArg::Modifier { modifier, .. } => { if let Matcher::Mod(list) = self { @@ -487,14 +567,14 @@ impl Matcher { } else { false } - }, + } CleanArg::Dot { .. } => *self == Matcher::Dot, CleanArg::Lit { ident } => match self { Matcher::Ident => true, Matcher::Cond => COND_MAP.contains_key(&&*ident.to_string()), Matcher::Lit(s) => ident == s, - _ => false - } + _ => false, + }, } } @@ -507,22 +587,20 @@ impl Matcher { Matcher::Ident => 1, Matcher::Cond => 1, Matcher::Imm => 1, - Matcher::W | - Matcher::X | - Matcher::WSP | - Matcher::XSP | - Matcher::B | - Matcher::H | - Matcher::S | - Matcher::D | - Matcher::Q => 1, - Matcher::V(_) | - Matcher::VStatic(_, _) => 1, + Matcher::W + | Matcher::X + | Matcher::WSP + | Matcher::XSP + | Matcher::B + | Matcher::H + | Matcher::S + | Matcher::D + | Matcher::Q => 1, + Matcher::V(_) | Matcher::VStatic(_, _) => 1, Matcher::VElement(_) => 2, Matcher::VElementStatic(_, _) => 1, Matcher::VStaticElement(_, _) => 2, - Matcher::RegList(_, _) | - Matcher::RegListStatic(_, _, _) => 1, + Matcher::RegList(_, _) | Matcher::RegListStatic(_, _, _) => 1, Matcher::RegListElement(_, _) => 2, Matcher::Offset => 1, Matcher::RefBase => 1, @@ -546,18 +624,22 @@ pub fn match_args(args: &[CleanArg], data: &'static Opdata) -> Option for matcher in data.matchers { match matcher { - Matcher::End => if args.peek().is_some() { - continue; - } else { - return Some(ctx); - }, - matcher => if let Some(arg) = args.next() { - if !matcher.matches(arg, &mut ctx) { + Matcher::End => { + if args.peek().is_some() { + continue; + } else { + return Some(ctx); + } + } + matcher => { + if let Some(arg) = args.next() { + if !matcher.matches(arg, &mut ctx) { + return None; + } + } else { return None; } - } else { - return None; - }, + } } } @@ -576,63 +658,79 @@ fn flatten_args(args: Vec, data: &Opdata, ctx: &mut MatchData) { for matcher in data.matchers { let arg_count = match matcher { Matcher::End => continue, - matcher => matcher.flatarg_count() + matcher => matcher.flatarg_count(), }; if let Some(arg) = source_args.next() { match arg { - CleanArg::Reference { span, base, kind} => { - new_args.push(FlatArg::Direct { span, reg: base.kind_owned() } ); + CleanArg::Reference { span, base, kind } => { + new_args.push(FlatArg::Direct { + span, + reg: base.kind_owned(), + }); match kind { RefKind::Base => (), - RefKind::Offset(value) => - new_args.push(FlatArg::Immediate { value } ), - RefKind::PreIndexed(value) => - new_args.push(FlatArg::Immediate { value } ), + RefKind::Offset(value) => new_args.push(FlatArg::Immediate { value }), + RefKind::PreIndexed(value) => new_args.push(FlatArg::Immediate { value }), RefKind::Indexed(index, modifier) => { - new_args.push(FlatArg::Direct { span, reg: index.kind_owned() } ); + new_args.push(FlatArg::Direct { + span, + reg: index.kind_owned(), + }); if let Some(modifier) = modifier { - new_args.push(FlatArg::Modifier { span, modifier: modifier.op } ); + new_args.push(FlatArg::Modifier { + span, + modifier: modifier.op, + }); if let Some(expr) = modifier.expr { - new_args.push(FlatArg::Immediate { value: expr } ); + new_args.push(FlatArg::Immediate { value: expr }); } } } } - }, - CleanArg::RegList { span, first, element, .. } => { - new_args.push(FlatArg::Direct { span, reg: first.kind_owned() } ); + } + CleanArg::RegList { + span, + first, + element, + .. + } => { + new_args.push(FlatArg::Direct { + span, + reg: first.kind_owned(), + }); if let Some(element) = element { - new_args.push(FlatArg::Immediate { value: element } ); + new_args.push(FlatArg::Immediate { value: element }); } - }, - CleanArg::Direct { span, reg } => { - match reg { - Register::Scalar(s) => { - new_args.push(FlatArg::Direct { span, reg: s.kind }); - }, - Register::Vector(v) => { - new_args.push(FlatArg::Direct { span, reg: v.kind }); - if let Some(element) = v.element { - new_args.push(FlatArg::Immediate { value: element }); - } + } + CleanArg::Direct { span, reg } => match reg { + Register::Scalar(s) => { + new_args.push(FlatArg::Direct { span, reg: s.kind }); + } + Register::Vector(v) => { + new_args.push(FlatArg::Direct { span, reg: v.kind }); + if let Some(element) = v.element { + new_args.push(FlatArg::Immediate { value: element }); } } }, CleanArg::JumpTarget { jump } => { - new_args.push(FlatArg::JumpTarget { jump } ); - }, + new_args.push(FlatArg::JumpTarget { jump }); + } CleanArg::Immediate { value, .. } => { - new_args.push(FlatArg::Immediate { value } ); - }, + new_args.push(FlatArg::Immediate { value }); + } CleanArg::Modifier { span, modifier } => { if arg_count >= 2 { - new_args.push(FlatArg::Modifier { span, modifier: modifier.op } ); + new_args.push(FlatArg::Modifier { + span, + modifier: modifier.op, + }); } if let Some(expr) = modifier.expr { new_args.push(FlatArg::Immediate { value: expr }); } - }, + } CleanArg::Dot { .. } => (), CleanArg::Lit { ident } => { new_args.push(FlatArg::Lit { ident }); diff --git a/plugin/src/arch/aarch64/mod.rs b/plugin/src/arch/aarch64/mod.rs index 64fdd4cc01..d6551d5194 100644 --- a/plugin/src/arch/aarch64/mod.rs +++ b/plugin/src/arch/aarch64/mod.rs @@ -1,18 +1,18 @@ -use syn::parse; use proc_macro_error::emit_error; +use syn::parse; +mod aarch64data; mod ast; -mod parser; -mod matching; mod compiler; -mod aarch64data; -mod encoding_helpers; mod debug; +mod encoding_helpers; +mod matching; +mod parser; -use crate::State; -use crate::common::{Size, Stmt, Jump}; -use crate::arch::Arch; use self::aarch64data::Relocation; +use crate::arch::Arch; +use crate::common::{Jump, Size, Stmt}; +use crate::State; #[cfg(feature = "dynasm_opmap")] pub use debug::create_opmap; @@ -20,17 +20,15 @@ pub use debug::create_opmap; pub use debug::extract_opmap; struct Context<'a, 'b: 'a> { - pub state: &'a mut State<'b> + pub state: &'a mut State<'b>, } #[derive(Clone, Debug)] -pub struct ArchAarch64 { - -} +pub struct ArchAarch64 {} impl Default for ArchAarch64 { fn default() -> ArchAarch64 { - ArchAarch64 { } + ArchAarch64 {} } } @@ -54,7 +52,10 @@ impl Arch for ArchAarch64 { Size::DWORD => Relocation::LITERAL32, Size::QWORD => Relocation::LITERAL64, _ => { - emit_error!(span, "Relocation of unsupported size for the current target architecture"); + emit_error!( + span, + "Relocation of unsupported size for the current target architecture" + ); return; } }; @@ -67,10 +68,12 @@ impl Arch for ArchAarch64 { 0 } - fn compile_instruction(&self, state: &mut State, input: parse::ParseStream) -> parse::Result<()> { - let mut ctx = Context { - state - }; + fn compile_instruction( + &self, + state: &mut State, + input: parse::ParseStream, + ) -> parse::Result<()> { + let mut ctx = Context { state }; let (instruction, args) = parser::parse_instruction(&mut ctx, input)?; let span = instruction.span; @@ -79,20 +82,20 @@ impl Arch for ArchAarch64 { Err(None) => return Ok(()), Err(Some(e)) => { emit_error!(span, e); - return Ok(()) + return Ok(()); } - Ok(m) => m + Ok(m) => m, }; match compiler::compile_instruction(&mut ctx, match_data) { Err(None) => return Ok(()), Err(Some(e)) => { emit_error!(span, e); - return Ok(()) + return Ok(()); } - Ok(()) => () + Ok(()) => (), } Ok(()) } -} \ No newline at end of file +} diff --git a/plugin/src/arch/aarch64/parser.rs b/plugin/src/arch/aarch64/parser.rs index e330b29963..505829a7eb 100644 --- a/plugin/src/arch/aarch64/parser.rs +++ b/plugin/src/arch/aarch64/parser.rs @@ -2,17 +2,23 @@ use syn::{parse, Token}; use lazy_static::lazy_static; -use crate::parse_helpers::{parse_ident_or_rust_keyword, ParseOpt, ParseOptExt}; use crate::common::Size; +use crate::parse_helpers::{parse_ident_or_rust_keyword, ParseOpt, ParseOptExt}; +use super::ast::{ + Instruction, Modifier, ModifyExpr, RawArg, RefItem, RegFamily, RegId, RegKind, RegScalar, + RegVector, Register, +}; use super::Context; -use super::ast::{Instruction, RawArg, Register, RegId, RegKind, RegScalar, RegVector, RegFamily, RefItem, Modifier, ModifyExpr}; use std::collections::HashMap; // parses a full instruction // syntax for a single op: ident ("." expr)* (arg ("," arg)*)? ";" -pub(super) fn parse_instruction(ctx: &mut Context, input: parse::ParseStream) -> parse::Result<(Instruction, Vec)> { +pub(super) fn parse_instruction( + ctx: &mut Context, + input: parse::ParseStream, +) -> parse::Result<(Instruction, Vec)> { let span = input.cursor().span(); // read the full dot-separated op @@ -26,7 +32,7 @@ pub(super) fn parse_instruction(ctx: &mut Context, input: parse::ParseStream) -> let _: Token![.] = input.parse()?; let arg: syn::Ident = input.parse()?; - args.push(RawArg::Dot { span } ); + args.push(RawArg::Dot { span }); args.push(RawArg::Lit { ident: arg }); } @@ -43,13 +49,7 @@ pub(super) fn parse_instruction(ctx: &mut Context, input: parse::ParseStream) -> // let span = span.join(input.cursor().span()); // FIXME can't join spans ATM - Ok(( - Instruction { - ident: op, - span - }, - args - )) + Ok((Instruction { ident: op, span }, args)) } /// tries to parse a full arg definition @@ -58,9 +58,7 @@ fn parse_arg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result parse::Result parse::Result parse::Result parse::Result parse::Result *element = Some(expr), - _ => () + RawArg::DashList { + ref mut element, .. + } + | RawArg::CommaList { + ref mut element, .. + } + | RawArg::AmountList { + ref mut element, .. + } => *element = Some(expr), + _ => (), } } - return Ok(ast) + return Ok(ast); } // modifier if let Some(modifier) = input.parse_opt()? { return Ok(RawArg::Modifier { span: _start, - modifier + modifier, }); } @@ -180,23 +179,20 @@ fn parse_arg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result parse::Result< if let Some(modifier) = input.parse_opt()? { return Ok(RefItem::Modifier { span: _start, - modifier + modifier, }); } @@ -215,24 +211,17 @@ fn parse_refitem(ctx: &mut Context, input: parse::ParseStream) -> parse::Result< if input.peek(Token![#]) { let _: Token![#] = input.parse()?; let arg: syn::Expr = input.parse()?; - return Ok(RefItem::Immediate { - value: arg - }); + return Ok(RefItem::Immediate { value: arg }); } // register if let Some(reg) = parse_reg(ctx, input)? { - return Ok(RefItem::Direct { - reg, - span: _start - }) + return Ok(RefItem::Direct { reg, span: _start }); } // immediate (relaxed notation) let arg: syn::Expr = input.parse()?; - Ok(RefItem::Immediate { - value: arg - }) + Ok(RefItem::Immediate { value: arg }) } fn parse_reg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result> { @@ -255,7 +244,7 @@ fn parse_reg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result name, - Err(_) => return Ok(None) + Err(_) => return Ok(None), }; let kind; @@ -264,9 +253,7 @@ fn parse_reg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result parse::Result parse::Result Size::DWORD, "D" | "d" => Size::QWORD, "Q" | "q" => Size::OWORD, - _ => return Err(cursor.error("Invalid width specifier")) + _ => return Err(cursor.error("Invalid width specifier")), }; let lanes = if trailer.is_empty() { @@ -318,8 +302,8 @@ fn parse_reg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result 2, Ok(4) => 4, Ok(8) => 8, - Ok(16)=> 16, - _ => return Err(cursor.error("Invalid width specifier")) + Ok(16) => 16, + _ => return Err(cursor.error("Invalid width specifier")), }) }; @@ -344,7 +328,7 @@ fn parse_reg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result parse::Result> { let modifier: Modifier = match input.parse() { Ok(m) => m, - Err(_) => return Ok(None) + Err(_) => return Ok(None), }; // valid terminating symbols @@ -376,10 +360,10 @@ impl parse::Parse for Modifier { input.step(|cursor| { if let Some((ident, rest)) = cursor.ident() { let modifier = match &*ident.to_string() { - "LSL" | "lsl" => Modifier::LSL, - "LSR" | "lsr" => Modifier::LSR, - "ASR" | "asr" => Modifier::ASR, - "ROR" | "ror" => Modifier::ROR, + "LSL" | "lsl" => Modifier::LSL, + "LSR" | "lsr" => Modifier::LSR, + "ASR" | "asr" => Modifier::ASR, + "ROR" | "ror" => Modifier::ROR, "SXTX" | "sxtx" => Modifier::SXTX, "SXTW" | "sxtw" => Modifier::SXTW, "SXTH" | "sxth" => Modifier::SXTH, @@ -388,8 +372,8 @@ impl parse::Parse for Modifier { "UXTW" | "uxtw" => Modifier::UXTW, "UXTH" | "uxth" => Modifier::UXTH, "UXTB" | "uxtb" => Modifier::UXTB, - "MSL" | "msl" => Modifier::MSL, - _ => return Err(cursor.error("Unknown modifier")) + "MSL" | "msl" => Modifier::MSL, + _ => return Err(cursor.error("Unknown modifier")), }; Ok((modifier, rest)) @@ -400,22 +384,22 @@ impl parse::Parse for Modifier { } } -lazy_static!{ +lazy_static! { static ref AARCH64_REGISTERS: HashMap<&'static str, (RegId, Option)> = { use self::RegId::*; use crate::common::Size::*; static MAP: &[(&str, (RegId, Option))] = &[ - ("x0" , (X0 , Some(QWORD))), - ("x1" , (X1 , Some(QWORD))), - ("x2" , (X2 , Some(QWORD))), - ("x3" , (X3 , Some(QWORD))), - ("x4" , (X4 , Some(QWORD))), - ("x5" , (X5 , Some(QWORD))), - ("x6" , (X6 , Some(QWORD))), - ("x7" , (X7 , Some(QWORD))), - ("x8" , (X8 , Some(QWORD))), - ("x9" , (X9 , Some(QWORD))), + ("x0", (X0, Some(QWORD))), + ("x1", (X1, Some(QWORD))), + ("x2", (X2, Some(QWORD))), + ("x3", (X3, Some(QWORD))), + ("x4", (X4, Some(QWORD))), + ("x5", (X5, Some(QWORD))), + ("x6", (X6, Some(QWORD))), + ("x7", (X7, Some(QWORD))), + ("x8", (X8, Some(QWORD))), + ("x9", (X9, Some(QWORD))), ("x10", (X10, Some(QWORD))), ("x11", (X11, Some(QWORD))), ("x12", (X12, Some(QWORD))), @@ -437,17 +421,16 @@ lazy_static!{ ("x28", (X28, Some(QWORD))), ("x29", (X29, Some(QWORD))), ("x30", (X30, Some(QWORD))), - - ("w0" , (X0 , Some(DWORD))), - ("w1" , (X1 , Some(DWORD))), - ("w2" , (X2 , Some(DWORD))), - ("w3" , (X3 , Some(DWORD))), - ("w4" , (X4 , Some(DWORD))), - ("w5" , (X5 , Some(DWORD))), - ("w6" , (X6 , Some(DWORD))), - ("w7" , (X7 , Some(DWORD))), - ("w8" , (X8 , Some(DWORD))), - ("w9" , (X9 , Some(DWORD))), + ("w0", (X0, Some(DWORD))), + ("w1", (X1, Some(DWORD))), + ("w2", (X2, Some(DWORD))), + ("w3", (X3, Some(DWORD))), + ("w4", (X4, Some(DWORD))), + ("w5", (X5, Some(DWORD))), + ("w6", (X6, Some(DWORD))), + ("w7", (X7, Some(DWORD))), + ("w8", (X8, Some(DWORD))), + ("w9", (X9, Some(DWORD))), ("w10", (X10, Some(DWORD))), ("w11", (X11, Some(DWORD))), ("w12", (X12, Some(DWORD))), @@ -469,23 +452,20 @@ lazy_static!{ ("w28", (X28, Some(DWORD))), ("w29", (X29, Some(DWORD))), ("w30", (X30, Some(DWORD))), - - ("sp", (SP, Some(QWORD))), - ("wsp", (SP, Some(DWORD))), - + ("sp", (SP, Some(QWORD))), + ("wsp", (SP, Some(DWORD))), ("xzr", (XZR, Some(QWORD))), ("wzr", (XZR, Some(DWORD))), - - ("b0" , (V0 , Some(BYTE))), - ("b1" , (V1 , Some(BYTE))), - ("b2" , (V2 , Some(BYTE))), - ("b3" , (V3 , Some(BYTE))), - ("b4" , (V4 , Some(BYTE))), - ("b5" , (V5 , Some(BYTE))), - ("b6" , (V6 , Some(BYTE))), - ("b7" , (V7 , Some(BYTE))), - ("b8" , (V8 , Some(BYTE))), - ("b9" , (V9 , Some(BYTE))), + ("b0", (V0, Some(BYTE))), + ("b1", (V1, Some(BYTE))), + ("b2", (V2, Some(BYTE))), + ("b3", (V3, Some(BYTE))), + ("b4", (V4, Some(BYTE))), + ("b5", (V5, Some(BYTE))), + ("b6", (V6, Some(BYTE))), + ("b7", (V7, Some(BYTE))), + ("b8", (V8, Some(BYTE))), + ("b9", (V9, Some(BYTE))), ("b10", (V10, Some(BYTE))), ("b11", (V11, Some(BYTE))), ("b12", (V12, Some(BYTE))), @@ -508,17 +488,16 @@ lazy_static!{ ("b29", (V29, Some(BYTE))), ("b30", (V30, Some(BYTE))), ("b31", (V31, Some(BYTE))), - - ("h0" , (V0 , Some(WORD))), - ("h1" , (V1 , Some(WORD))), - ("h2" , (V2 , Some(WORD))), - ("h3" , (V3 , Some(WORD))), - ("h4" , (V4 , Some(WORD))), - ("h5" , (V5 , Some(WORD))), - ("h6" , (V6 , Some(WORD))), - ("h7" , (V7 , Some(WORD))), - ("h8" , (V8 , Some(WORD))), - ("h9" , (V9 , Some(WORD))), + ("h0", (V0, Some(WORD))), + ("h1", (V1, Some(WORD))), + ("h2", (V2, Some(WORD))), + ("h3", (V3, Some(WORD))), + ("h4", (V4, Some(WORD))), + ("h5", (V5, Some(WORD))), + ("h6", (V6, Some(WORD))), + ("h7", (V7, Some(WORD))), + ("h8", (V8, Some(WORD))), + ("h9", (V9, Some(WORD))), ("h10", (V10, Some(WORD))), ("h11", (V11, Some(WORD))), ("h12", (V12, Some(WORD))), @@ -541,17 +520,16 @@ lazy_static!{ ("h29", (V29, Some(WORD))), ("h30", (V30, Some(WORD))), ("h31", (V31, Some(WORD))), - - ("s0" , (V0 , Some(DWORD))), - ("s1" , (V1 , Some(DWORD))), - ("s2" , (V2 , Some(DWORD))), - ("s3" , (V3 , Some(DWORD))), - ("s4" , (V4 , Some(DWORD))), - ("s5" , (V5 , Some(DWORD))), - ("s6" , (V6 , Some(DWORD))), - ("s7" , (V7 , Some(DWORD))), - ("s8" , (V8 , Some(DWORD))), - ("s9" , (V9 , Some(DWORD))), + ("s0", (V0, Some(DWORD))), + ("s1", (V1, Some(DWORD))), + ("s2", (V2, Some(DWORD))), + ("s3", (V3, Some(DWORD))), + ("s4", (V4, Some(DWORD))), + ("s5", (V5, Some(DWORD))), + ("s6", (V6, Some(DWORD))), + ("s7", (V7, Some(DWORD))), + ("s8", (V8, Some(DWORD))), + ("s9", (V9, Some(DWORD))), ("s10", (V10, Some(DWORD))), ("s11", (V11, Some(DWORD))), ("s12", (V12, Some(DWORD))), @@ -574,17 +552,16 @@ lazy_static!{ ("s29", (V29, Some(DWORD))), ("s30", (V30, Some(DWORD))), ("s31", (V31, Some(DWORD))), - - ("d0" , (V0 , Some(QWORD))), - ("d1" , (V1 , Some(QWORD))), - ("d2" , (V2 , Some(QWORD))), - ("d3" , (V3 , Some(QWORD))), - ("d4" , (V4 , Some(QWORD))), - ("d5" , (V5 , Some(QWORD))), - ("d6" , (V6 , Some(QWORD))), - ("d7" , (V7 , Some(QWORD))), - ("d8" , (V8 , Some(QWORD))), - ("d9" , (V9 , Some(QWORD))), + ("d0", (V0, Some(QWORD))), + ("d1", (V1, Some(QWORD))), + ("d2", (V2, Some(QWORD))), + ("d3", (V3, Some(QWORD))), + ("d4", (V4, Some(QWORD))), + ("d5", (V5, Some(QWORD))), + ("d6", (V6, Some(QWORD))), + ("d7", (V7, Some(QWORD))), + ("d8", (V8, Some(QWORD))), + ("d9", (V9, Some(QWORD))), ("d10", (V10, Some(QWORD))), ("d11", (V11, Some(QWORD))), ("d12", (V12, Some(QWORD))), @@ -607,17 +584,16 @@ lazy_static!{ ("d29", (V29, Some(QWORD))), ("d30", (V30, Some(QWORD))), ("d31", (V31, Some(QWORD))), - - ("q0" , (V0 , Some(OWORD))), - ("q1" , (V1 , Some(OWORD))), - ("q2" , (V2 , Some(OWORD))), - ("q3" , (V3 , Some(OWORD))), - ("q4" , (V4 , Some(OWORD))), - ("q5" , (V5 , Some(OWORD))), - ("q6" , (V6 , Some(OWORD))), - ("q7" , (V7 , Some(OWORD))), - ("q8" , (V8 , Some(OWORD))), - ("q9" , (V9 , Some(OWORD))), + ("q0", (V0, Some(OWORD))), + ("q1", (V1, Some(OWORD))), + ("q2", (V2, Some(OWORD))), + ("q3", (V3, Some(OWORD))), + ("q4", (V4, Some(OWORD))), + ("q5", (V5, Some(OWORD))), + ("q6", (V6, Some(OWORD))), + ("q7", (V7, Some(OWORD))), + ("q8", (V8, Some(OWORD))), + ("q9", (V9, Some(OWORD))), ("q10", (V10, Some(OWORD))), ("q11", (V11, Some(OWORD))), ("q12", (V12, Some(OWORD))), @@ -640,17 +616,16 @@ lazy_static!{ ("q29", (V29, Some(OWORD))), ("q30", (V30, Some(OWORD))), ("q31", (V31, Some(OWORD))), - - ("v0" , (V0 , None)), - ("v1" , (V1 , None)), - ("v2" , (V2 , None)), - ("v3" , (V3 , None)), - ("v4" , (V4 , None)), - ("v5" , (V5 , None)), - ("v6" , (V6 , None)), - ("v7" , (V7 , None)), - ("v8" , (V8 , None)), - ("v9" , (V9 , None)), + ("v0", (V0, None)), + ("v1", (V1, None)), + ("v2", (V2, None)), + ("v3", (V3, None)), + ("v4", (V4, None)), + ("v5", (V5, None)), + ("v6", (V6, None)), + ("v7", (V7, None)), + ("v8", (V8, None)), + ("v9", (V9, None)), ("v10", (V10, None)), ("v11", (V11, None)), ("v12", (V12, None)), @@ -676,20 +651,17 @@ lazy_static!{ ]; MAP.iter().cloned().collect() }; - static ref AARCH64_FAMILIES: HashMap<&'static str, (RegFamily, Option)> = { static MAP: &[(&str, (RegFamily, Option))] = &[ - ("X", (RegFamily::INTEGER, Some(Size::QWORD))), - ("W", (RegFamily::INTEGER, Some(Size::DWORD))), + ("X", (RegFamily::INTEGER, Some(Size::QWORD))), + ("W", (RegFamily::INTEGER, Some(Size::DWORD))), ("XSP", (RegFamily::INTEGERSP, Some(Size::QWORD))), ("WSP", (RegFamily::INTEGERSP, Some(Size::DWORD))), - ("B", (RegFamily::SIMD, Some(Size::BYTE))), ("H", (RegFamily::SIMD, Some(Size::WORD))), ("S", (RegFamily::SIMD, Some(Size::DWORD))), ("D", (RegFamily::SIMD, Some(Size::QWORD))), ("Q", (RegFamily::SIMD, Some(Size::OWORD))), - ("V", (RegFamily::SIMD, None)), ]; MAP.iter().cloned().collect() diff --git a/plugin/src/arch/mod.rs b/plugin/src/arch/mod.rs index e9c11a0b77..42e4e00351 100644 --- a/plugin/src/arch/mod.rs +++ b/plugin/src/arch/mod.rs @@ -1,25 +1,29 @@ -use syn::parse; use proc_macro_error::emit_error; +use syn::parse; -use crate::common::{Size, Stmt, Jump}; +use crate::common::{Jump, Size, Stmt}; use crate::State; use std::fmt::Debug; -pub mod x64; pub mod aarch64; +pub mod x64; -pub(crate) trait Arch : Debug + Send { +pub(crate) trait Arch: Debug + Send { fn name(&self) -> &str; fn set_features(&mut self, features: &[syn::Ident]); fn handle_static_reloc(&self, stmts: &mut Vec, reloc: Jump, size: Size); fn default_align(&self) -> u8; - fn compile_instruction(&self, state: &mut State, input: parse::ParseStream) -> parse::Result<()>; + fn compile_instruction( + &self, + state: &mut State, + input: parse::ParseStream, + ) -> parse::Result<()>; } #[derive(Clone, Debug)] pub struct DummyArch { - name: &'static str + name: &'static str, } impl DummyArch { @@ -41,15 +45,25 @@ impl Arch for DummyArch { fn handle_static_reloc(&self, _stmts: &mut Vec, reloc: Jump, _size: Size) { let span = reloc.span(); - emit_error!(span, "Current assembling architecture is undefined. Define it using a .arch directive"); + emit_error!( + span, + "Current assembling architecture is undefined. Define it using a .arch directive" + ); } fn default_align(&self) -> u8 { 0 } - fn compile_instruction(&self, _state: &mut State, input: parse::ParseStream) -> parse::Result<()> { - emit_error!(input.cursor().span(), "Current assembling architecture is undefined. Define it using a .arch directive"); + fn compile_instruction( + &self, + _state: &mut State, + input: parse::ParseStream, + ) -> parse::Result<()> { + emit_error!( + input.cursor().span(), + "Current assembling architecture is undefined. Define it using a .arch directive" + ); Ok(()) } } @@ -60,15 +74,15 @@ pub(crate) fn from_str(s: &str) -> Option> { "x86" => Some(Box::new(x64::Archx86::default())), "aarch64" => Some(Box::new(aarch64::ArchAarch64::default())), "unknown" => Some(Box::new(DummyArch::new("unknown"))), - _ => None + _ => None, } } -#[cfg(target_arch="x86_64")] +#[cfg(target_arch = "x86_64")] pub const CURRENT_ARCH: &str = "x64"; -#[cfg(target_arch="x86")] +#[cfg(target_arch = "x86")] pub const CURRENT_ARCH: &str = "x86"; -#[cfg(target_arch="aarch64")] +#[cfg(target_arch = "aarch64")] pub const CURRENT_ARCH: &str = "aarch64"; -#[cfg(not(any(target_arch="x86", target_arch="x86_64", target_arch="aarch64")))] +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))] pub const CURRENT_ARCH: &str = "unknown"; diff --git a/plugin/src/arch/x64/ast.rs b/plugin/src/arch/x64/ast.rs index 291403f4d6..3d32c6224d 100644 --- a/plugin/src/arch/x64/ast.rs +++ b/plugin/src/arch/x64/ast.rs @@ -1,11 +1,10 @@ -use syn; use proc_macro2::Span; +use syn; -use crate::common::{Size, Jump}; +use crate::common::{Jump, Size}; use std::cmp::PartialEq; - /** * Reused AST parts */ @@ -17,13 +16,13 @@ use std::cmp::PartialEq; #[derive(Debug, Clone)] pub struct Register { pub size: Size, - pub kind: RegKind + pub kind: RegKind, } #[derive(Debug, Clone)] pub enum RegKind { Static(RegId), - Dynamic(RegFamily, syn::Expr) + Dynamic(RegFamily, syn::Expr), } // this map identifies the different registers that exist. some of these can be referred to as different sizes @@ -31,49 +30,119 @@ pub enum RegKind { #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum RegId { // size: 1, 2, 4 or 8 bytes - RAX = 0x00, RCX = 0x01, RDX = 0x02, RBX = 0x03, - RSP = 0x04, RBP = 0x05, RSI = 0x06, RDI = 0x07, - R8 = 0x08, R9 = 0x09, R10 = 0x0A, R11 = 0x0B, - R12 = 0x0C, R13 = 0x0D, R14 = 0x0E, R15 = 0x0F, + RAX = 0x00, + RCX = 0x01, + RDX = 0x02, + RBX = 0x03, + RSP = 0x04, + RBP = 0x05, + RSI = 0x06, + RDI = 0x07, + R8 = 0x08, + R9 = 0x09, + R10 = 0x0A, + R11 = 0x0B, + R12 = 0x0C, + R13 = 0x0D, + R14 = 0x0E, + R15 = 0x0F, // size: 4 or 8 bytes RIP = 0x15, // size: 1 byte - AH = 0x24, CH = 0x25, DH = 0x26, BH = 0x27, + AH = 0x24, + CH = 0x25, + DH = 0x26, + BH = 0x27, // size: 10 bytes - ST0 = 0x30, ST1 = 0x31, ST2 = 0x32, ST3 = 0x33, - ST4 = 0x34, ST5 = 0x35, ST6 = 0x36, ST7 = 0x37, + ST0 = 0x30, + ST1 = 0x31, + ST2 = 0x32, + ST3 = 0x33, + ST4 = 0x34, + ST5 = 0x35, + ST6 = 0x36, + ST7 = 0x37, // size: 8 bytes. alternative encoding exists - MMX0 = 0x40, MMX1 = 0x41, MMX2 = 0x42, MMX3 = 0x43, - MMX4 = 0x44, MMX5 = 0x45, MMX6 = 0x46, MMX7 = 0x47, + MMX0 = 0x40, + MMX1 = 0x41, + MMX2 = 0x42, + MMX3 = 0x43, + MMX4 = 0x44, + MMX5 = 0x45, + MMX6 = 0x46, + MMX7 = 0x47, // size: 16 bytes or 32 bytes - XMM0 = 0x50, XMM1 = 0x51, XMM2 = 0x52, XMM3 = 0x53, - XMM4 = 0x54, XMM5 = 0x55, XMM6 = 0x56, XMM7 = 0x57, - XMM8 = 0x58, XMM9 = 0x59, XMM10 = 0x5A, XMM11 = 0x5B, - XMM12 = 0x5C, XMM13 = 0x5D, XMM14 = 0x5E, XMM15 = 0x5F, + XMM0 = 0x50, + XMM1 = 0x51, + XMM2 = 0x52, + XMM3 = 0x53, + XMM4 = 0x54, + XMM5 = 0x55, + XMM6 = 0x56, + XMM7 = 0x57, + XMM8 = 0x58, + XMM9 = 0x59, + XMM10 = 0x5A, + XMM11 = 0x5B, + XMM12 = 0x5C, + XMM13 = 0x5D, + XMM14 = 0x5E, + XMM15 = 0x5F, // size: 2 bytes. alternative encoding exists - ES = 0x60, CS = 0x61, SS = 0x62, DS = 0x63, - FS = 0x64, GS = 0x65, + ES = 0x60, + CS = 0x61, + SS = 0x62, + DS = 0x63, + FS = 0x64, + GS = 0x65, // size: 4 bytes - CR0 = 0x70, CR1 = 0x71, CR2 = 0x72, CR3 = 0x73, - CR4 = 0x74, CR5 = 0x75, CR6 = 0x76, CR7 = 0x77, - CR8 = 0x78, CR9 = 0x79, CR10 = 0x7A, CR11 = 0x7B, - CR12 = 0x7C, CR13 = 0x7D, CR14 = 0x7E, CR15 = 0x7F, + CR0 = 0x70, + CR1 = 0x71, + CR2 = 0x72, + CR3 = 0x73, + CR4 = 0x74, + CR5 = 0x75, + CR6 = 0x76, + CR7 = 0x77, + CR8 = 0x78, + CR9 = 0x79, + CR10 = 0x7A, + CR11 = 0x7B, + CR12 = 0x7C, + CR13 = 0x7D, + CR14 = 0x7E, + CR15 = 0x7F, // size: 4 bytes - DR0 = 0x80, DR1 = 0x81, DR2 = 0x82, DR3 = 0x83, - DR4 = 0x84, DR5 = 0x85, DR6 = 0x86, DR7 = 0x87, - DR8 = 0x88, DR9 = 0x89, DR10 = 0x8A, DR11 = 0x8B, - DR12 = 0x8C, DR13 = 0x8D, DR14 = 0x8E, DR15 = 0x8F, + DR0 = 0x80, + DR1 = 0x81, + DR2 = 0x82, + DR3 = 0x83, + DR4 = 0x84, + DR5 = 0x85, + DR6 = 0x86, + DR7 = 0x87, + DR8 = 0x88, + DR9 = 0x89, + DR10 = 0x8A, + DR11 = 0x8B, + DR12 = 0x8C, + DR13 = 0x8D, + DR14 = 0x8E, + DR15 = 0x8F, // size: 16 bytes - BND0 = 0x90, BND1 = 0x91, BND2 = 0x92, BND3 = 0x93 + BND0 = 0x90, + BND1 = 0x91, + BND2 = 0x92, + BND3 = 0x93, } #[derive(Debug, PartialOrd, PartialEq, Ord, Eq, Hash, Clone, Copy)] @@ -87,16 +156,22 @@ pub enum RegFamily { SEGMENT = 6, CONTROL = 7, DEBUG = 8, - BOUND = 9 + BOUND = 9, } impl Register { pub fn new_static(size: Size, id: RegId) -> Register { - Register {size, kind: RegKind::Static(id) } + Register { + size, + kind: RegKind::Static(id), + } } pub fn new_dynamic(size: Size, family: RegFamily, id: syn::Expr) -> Register { - Register {size, kind: RegKind::Dynamic(family, id) } + Register { + size, + kind: RegKind::Dynamic(family, id), + } } pub fn size(&self) -> Size { @@ -108,31 +183,30 @@ impl RegKind { pub fn code(&self) -> Option { match *self { RegKind::Static(code) => Some(code.code()), - RegKind::Dynamic(_, _) => None + RegKind::Dynamic(_, _) => None, } } pub fn family(&self) -> RegFamily { match *self { RegKind::Static(code) => code.family(), - RegKind::Dynamic(family, _) => family + RegKind::Dynamic(family, _) => family, } } pub fn is_dynamic(&self) -> bool { match *self { RegKind::Static(_) => false, - RegKind::Dynamic(_, _) => true + RegKind::Dynamic(_, _) => true, } } pub fn is_extended(&self) -> bool { match self.family() { - RegFamily::LEGACY | - RegFamily::XMM | - RegFamily::CONTROL | - RegFamily::DEBUG => self.code().unwrap_or(8) > 7, - _ => false + RegFamily::LEGACY | RegFamily::XMM | RegFamily::CONTROL | RegFamily::DEBUG => { + self.code().unwrap_or(8) > 7 + } + _ => false, } } @@ -150,7 +224,7 @@ impl PartialEq for Register { if self.size == other.size { if let RegKind::Static(code) = self.kind { if let RegKind::Static(other_code) = other.kind { - return code == other_code + return code == other_code; } } } @@ -168,7 +242,7 @@ impl PartialEq for RegKind { fn eq(&self, other: &RegId) -> bool { match *self { RegKind::Static(id) => id == *other, - RegKind::Dynamic(_, _) => false + RegKind::Dynamic(_, _) => false, } } } @@ -178,7 +252,7 @@ impl PartialEq for Option { fn eq(&self, other: &RegId) -> bool { match *self { Some(ref a) => a == other, - None => false + None => false, } } } @@ -187,7 +261,7 @@ impl PartialEq for Option { fn eq(&self, other: &RegId) -> bool { match *self { Some(ref a) => a == other, - None => false + None => false, } } } @@ -209,29 +283,29 @@ impl RegId { 7 => RegFamily::CONTROL, 8 => RegFamily::DEBUG, 9 => RegFamily::BOUND, - _ => unreachable!() + _ => unreachable!(), } } pub fn from_number(id: u8) -> RegId { match id { - 0 => RegId::RAX, - 1 => RegId::RCX, - 2 => RegId::RDX, - 3 => RegId::RBX, - 4 => RegId::RSP, - 5 => RegId::RBP, - 6 => RegId::RSI, - 7 => RegId::RDI, - 8 => RegId::R8, - 9 => RegId::R9, + 0 => RegId::RAX, + 1 => RegId::RCX, + 2 => RegId::RDX, + 3 => RegId::RBX, + 4 => RegId::RSP, + 5 => RegId::RBP, + 6 => RegId::RSI, + 7 => RegId::RDI, + 8 => RegId::R8, + 9 => RegId::R9, 10 => RegId::R10, 11 => RegId::R11, 12 => RegId::R12, 13 => RegId::R13, 14 => RegId::R14, 15 => RegId::R15, - _ => panic!("invalid register code {:?}", id) + _ => panic!("invalid register code {:?}", id), } } } @@ -244,7 +318,7 @@ impl RegId { pub enum MemoryRefItem { ScaledRegister(Register, isize), Register(Register), - Displacement(syn::Expr) + Displacement(syn::Expr), } /** @@ -272,28 +346,28 @@ pub enum RawArg { disp_size: Option, items: Vec, }, - // direct register reference, + // direct register reference, Direct { span: Span, - reg: Register + reg: Register, }, // a jump offset, i.e. ->foo JumpTarget { jump: Jump, - size: Option + size: Option, }, // a memory reference to a label, i.e. [->foo] IndirectJumpTarget { jump: Jump, - size: Option + size: Option, }, // just an arbitrary expression Immediate { value: syn::Expr, - size: Option + size: Option, }, // used to not block the parser on a parsing error in a single arg - Invalid + Invalid, } #[derive(Debug)] @@ -306,28 +380,28 @@ pub enum CleanArg { disp_size: Option, base: Option, index: Option<(Register, isize, Option)>, - disp: Option + disp: Option, }, - // direct register reference, + // direct register reference, Direct { span: Span, - reg: Register + reg: Register, }, // a jump offset, i.e. ->foo JumpTarget { jump: Jump, - size: Option + size: Option, }, // a memory reference to a label, i.e. [->foo] IndirectJumpTarget { jump: Jump, - size: Option + size: Option, }, // just an arbitrary expression Immediate { value: syn::Expr, - size: Option - } + size: Option, + }, } #[derive(Debug)] @@ -339,27 +413,27 @@ pub enum SizedArg { disp_size: Option, base: Option, index: Option<(Register, isize, Option)>, - disp: Option + disp: Option, }, - // direct register reference, + // direct register reference, Direct { span: Span, - reg: Register + reg: Register, }, // a jump offset, i.e. ->foo JumpTarget { jump: Jump, - size: Size + size: Size, }, // a memory reference to a label, i.e. [->foo] IndirectJumpTarget { - jump: Jump + jump: Jump, }, // just an arbitrary expression Immediate { value: syn::Expr, - size: Size - } + size: Size, + }, } /** @@ -368,5 +442,5 @@ pub enum SizedArg { pub struct Instruction { pub span: Span, - pub idents: Vec + pub idents: Vec, } diff --git a/plugin/src/arch/x64/compiler.rs b/plugin/src/arch/x64/compiler.rs index 4a99b3c67d..900b8a7e2c 100644 --- a/plugin/src/arch/x64/compiler.rs +++ b/plugin/src/arch/x64/compiler.rs @@ -1,22 +1,23 @@ -use syn::spanned::Spanned; use proc_macro2::{Span, TokenTree}; -use quote::{quote_spanned}; use proc_macro_error::emit_error; +use quote::quote_spanned; +use syn::spanned::Spanned; -use crate::common::{Stmt, Size, Jump, JumpKind, delimited}; +use crate::common::{delimited, Jump, JumpKind, Size, Stmt}; use crate::serialize; -use super::{Context, X86Mode}; -use super::ast::{RawArg, CleanArg, SizedArg, Instruction, MemoryRefItem, Register, RegKind, RegFamily, RegId}; +use super::ast::{ + CleanArg, Instruction, MemoryRefItem, RawArg, RegFamily, RegId, RegKind, Register, SizedArg, +}; +use super::debug::format_opdata_list; use super::x64data::get_mnemnonic_data; -use super::x64data::Flags; use super::x64data::Features; -use super::debug::format_opdata_list; +use super::x64data::Flags; +use super::{Context, X86Mode}; +use std::iter; use std::mem::swap; use std::slice; -use std::iter; - /* * Instruction encoding data formats @@ -24,20 +25,22 @@ use std::iter; #[derive(Debug)] pub struct Opdata { - pub args: &'static [u8], // format string of arg format - pub ops: &'static [u8], - pub reg: u8, + pub args: &'static [u8], // format string of arg format + pub ops: &'static [u8], + pub reg: u8, pub flags: Flags, - pub features: Features + pub features: Features, } pub struct FormatStringIterator<'a> { - inner: iter::Cloned> + inner: iter::Cloned>, } impl<'a> FormatStringIterator<'a> { pub fn new(buf: &'a [u8]) -> FormatStringIterator<'a> { - FormatStringIterator { inner: buf.iter().cloned() } + FormatStringIterator { + inner: buf.iter().cloned(), + } } } @@ -61,10 +64,9 @@ impl<'a> Iterator for FormatStringIterator<'a> { const MOD_DIRECT: u8 = 0b11; const MOD_NODISP: u8 = 0b00; // normal addressing const MOD_NOBASE: u8 = 0b00; // VSIB addressing -const MOD_DISP8: u8 = 0b01; +const MOD_DISP8: u8 = 0b01; const MOD_DISP32: u8 = 0b10; - #[derive(Debug, Clone, Copy)] enum RelocationKind { /// A rip-relative relocation. No need to keep track of. @@ -80,7 +82,7 @@ impl RelocationKind { match self { RelocationKind::Relative => 0, RelocationKind::Absolute => 1, - RelocationKind::Extern => 2 + RelocationKind::Extern => 2, } } } @@ -89,19 +91,26 @@ impl RelocationKind { * Implementation */ -pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, args: Vec) -> Result<(), Option> { +pub(super) fn compile_instruction( + ctx: &mut Context, + instruction: Instruction, + args: Vec, +) -> Result<(), Option> { let mut ops = instruction.idents; let op = ops.pop().unwrap(); let prefixes = ops; // Fold RawArgs into CleanArgs - let mut args = args.into_iter().map(clean_memoryref).collect::, _>>()?; + let mut args = args + .into_iter() + .map(clean_memoryref) + .collect::, _>>()?; // sanitize memory references, determine address size, and size immediates/displacements if possible let addr_size = sanitize_indirects_and_sizes(&ctx, &mut args)?; let addr_size = addr_size.unwrap_or(match ctx.mode { X86Mode::Long => Size::QWORD, - X86Mode::Protected => Size::DWORD + X86Mode::Protected => Size::DWORD, }); // determine if we need an address size override prefix @@ -110,7 +119,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a (X86Mode::Long, Size::DWORD) => true, (X86Mode::Protected, Size::DWORD) => false, (X86Mode::Protected, Size::WORD) => true, - _ => return Err(Some("Impossible address size".into())) + _ => return Err(Some("Impossible address size".into())), }; // find a matching op @@ -135,15 +144,23 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a let mut vex_l = false; // determine if size prefixes are necessary - if data.flags.intersects(Flags::AUTO_SIZE | Flags::AUTO_NO32 | Flags::AUTO_REXW | Flags::AUTO_VEXL) { + if data + .flags + .intersects(Flags::AUTO_SIZE | Flags::AUTO_NO32 | Flags::AUTO_REXW | Flags::AUTO_VEXL) + { // if any of these flags are true an operand size should've been calculated let op_size = op_size.expect("Bad formatting data? No wildcard sizes"); match ctx.mode { - X86Mode::Protected => if op_size == Size::QWORD { - return Err(Some(format!("'{}': Does not support 64 bit operands in 32-bit mode", op.to_string()))); - }, - X86Mode::Long => () + X86Mode::Protected => { + if op_size == Size::QWORD { + return Err(Some(format!( + "'{}': Does not support 64 bit operands in 32-bit mode", + op.to_string() + ))); + } + } + X86Mode::Long => (), } if data.flags.contains(Flags::AUTO_NO32) { @@ -151,14 +168,22 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a (Size::WORD, _) => pref_size = true, (Size::QWORD, X86Mode::Long) => (), (Size::DWORD, X86Mode::Protected) => (), - (Size::DWORD, X86Mode::Long) => return Err(Some(format!("'{}': Does not support 32 bit operands in 64-bit mode", op.to_string()))), + (Size::DWORD, X86Mode::Long) => { + return Err(Some(format!( + "'{}': Does not support 32 bit operands in 64-bit mode", + op.to_string() + ))) + } (_, _) => panic!("bad formatting data"), } } else if data.flags.contains(Flags::AUTO_REXW) { if op_size == Size::QWORD { rex_w = true; } else if op_size != Size::DWORD { - return Err(Some(format!("'{}': Does not support 16-bit operands", op.to_string()))); + return Err(Some(format!( + "'{}': Does not support 16-bit operands", + op.to_string() + ))); } } else if data.flags.contains(Flags::AUTO_VEXL) { if op_size == Size::HWORD { @@ -177,13 +202,16 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // mandatory prefixes let pref_size = pref_size || data.flags.contains(Flags::WORD_SIZE); - let rex_w = rex_w || data.flags.contains(Flags::WITH_REXW); - let vex_l = vex_l || data.flags.contains(Flags::WITH_VEXL); + let rex_w = rex_w || data.flags.contains(Flags::WITH_REXW); + let vex_l = vex_l || data.flags.contains(Flags::WITH_VEXL); let pref_addr = pref_addr || data.flags.contains(Flags::PREF_67); - if data.flags.contains(Flags::PREF_F0) { pref_mod = Some(0xF0); - } else if data.flags.contains(Flags::PREF_F2) { pref_mod = Some(0xF2); - } else if data.flags.contains(Flags::PREF_F3) { pref_mod = Some(0xF3); + if data.flags.contains(Flags::PREF_F0) { + pref_mod = Some(0xF0); + } else if data.flags.contains(Flags::PREF_F2) { + pref_mod = Some(0xF2); + } else if data.flags.contains(Flags::PREF_F3) { + pref_mod = Some(0xF3); } // check if this combination of args can actually be encoded and whether a rex prefix is necessary @@ -220,15 +248,21 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // VEX/XOP prefixes embed the operand size prefix / modification prefixes in them. if data.flags.intersects(Flags::VEX_OP | Flags::XOP_OP) { - let prefix = if pref_size { 0b01 - } else if pref_mod == Some(0xF3) { 0b10 - } else if pref_mod == Some(0xF2) { 0b11 - } else { 0 + let prefix = if pref_size { + 0b01 + } else if pref_mod == Some(0xF3) { + 0b10 + } else if pref_mod == Some(0xF2) { + 0b11 + } else { + 0 }; // map_sel is stored in the first byte of the opcode let (&map_sel, tail) = ops.split_first().expect("bad formatting data"); ops = tail; - compile_vex_xop(ctx.mode, buffer, data, ®, &rm, map_sel, rex_w, &vvvv, vex_l, prefix); + compile_vex_xop( + ctx.mode, buffer, data, ®, &rm, map_sel, rex_w, &vvvv, vex_l, prefix, + ); // otherwise, the size/mod prefixes have to be pushed and check if a rex prefix has to be generated. } else { if let Some(pref) = pref_mod { @@ -241,7 +275,10 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // Certain SSE/AVX legacy encoded operations are not available in 32-bit mode // as they require a REX.W prefix to be encoded, which is impossible. We catch those cases here if ctx.mode == X86Mode::Protected { - return Err(Some(format!("'{}': Does not support 64 bit operand size in 32-bit mode", op.to_string()))) + return Err(Some(format!( + "'{}': Does not support 64 bit operand size in 32-bit mode", + op.to_string() + ))); } compile_rex(buffer, rex_w, ®, &rm); } @@ -253,7 +290,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a ops = head; buffer.push(Stmt::Extend(Vec::from(ops))); - let rm_k = if let Some(SizedArg::Direct {reg, ..}) = rm.take() { + let rm_k = if let Some(SizedArg::Direct { reg, .. }) = rm.take() { reg.kind } else { panic!("bad formatting data") @@ -261,7 +298,10 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a if let RegKind::Dynamic(_, expr) = rm_k { let last: TokenTree = proc_macro2::Literal::u8_suffixed(*last).into(); - buffer.push(Stmt::ExprUnsigned(serialize::expr_mask_shift_or(&last, &delimited(expr), 7, 0), Size::BYTE)); + buffer.push(Stmt::ExprUnsigned( + serialize::expr_mask_shift_or(&last, &delimited(expr), 7, 0), + Size::BYTE, + )); } else { buffer.push(Stmt::u8(last + (rm_k.encode() & 7))); } @@ -271,8 +311,8 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a } // Direct ModRM addressing - if let Some(SizedArg::Direct {reg: rm, ..}) = rm { - let reg_k = if let Some(SizedArg::Direct {reg, ..}) = reg { + if let Some(SizedArg::Direct { reg: rm, .. }) = rm { + let reg_k = if let Some(SizedArg::Direct { reg, .. }) = reg { reg.kind } else { RegKind::from_number(data.reg) @@ -280,18 +320,31 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a compile_modrm_sib(buffer, MOD_DIRECT, reg_k, rm.kind); // Indirect ModRM (+SIB) addressing - } else if let Some(SizedArg::Indirect {disp_size, base, index, disp, ..}) = rm { - let reg_k = if let Some(SizedArg::Direct {reg, ..}) = reg { + } else if let Some(SizedArg::Indirect { + disp_size, + base, + index, + disp, + .. + }) = rm + { + let reg_k = if let Some(SizedArg::Direct { reg, .. }) = reg { reg.kind } else { RegKind::from_number(data.reg) }; // check addressing mode special cases - let mode_vsib = index.as_ref().map_or(false, |&(ref i, _, _)| i.kind.family() == RegFamily::XMM); + let mode_vsib = index + .as_ref() + .map_or(false, |&(ref i, _, _)| i.kind.family() == RegFamily::XMM); let mode_16bit = addr_size == Size::WORD; - let mode_rip_relative = base.as_ref().map_or(false, |b| b.kind.family() == RegFamily::RIP); - let mode_rbp_base = base.as_ref().map_or(false, |b| b == &RegId::RBP || b == &RegId::R13 || b.kind.is_dynamic()); + let mode_rip_relative = base + .as_ref() + .map_or(false, |b| b.kind.family() == RegFamily::RIP); + let mode_rbp_base = base.as_ref().map_or(false, |b| { + b == &RegId::RBP || b == &RegId::R13 || b.kind.is_dynamic() + }); if mode_vsib { let (index, scale, scale_expr) = index.unwrap(); @@ -299,11 +352,14 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // VSIB addressing has simplified rules. let (base, mode) = if let Some(base) = base { - (base.kind, match (&disp, disp_size) { - (&Some(_), Some(Size::BYTE)) => MOD_DISP8, - (&Some(_), _) => MOD_DISP32, - (&None, _) => MOD_DISP8 - }) + ( + base.kind, + match (&disp, disp_size) { + (&Some(_), Some(Size::BYTE)) => MOD_DISP8, + (&Some(_), _) => MOD_DISP32, + (&None, _) => MOD_DISP8, + }, + ) } else { (RegKind::Static(RegId::RBP), MOD_NOBASE) }; @@ -318,7 +374,14 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a } if let Some(disp) = disp { - buffer.push(Stmt::ExprSigned(delimited(disp), if mode == MOD_DISP8 {Size::BYTE} else {Size::DWORD})); + buffer.push(Stmt::ExprSigned( + delimited(disp), + if mode == MOD_DISP8 { + Size::BYTE + } else { + Size::DWORD + }, + )); } else if mode == MOD_DISP8 { // no displacement was asked for, but we have to encode one as there's a base buffer.push(Stmt::u8(0)); @@ -326,7 +389,6 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // MODE_NOBASE requires a dword displacement, and if we got here no displacement was asked for. buffer.push(Stmt::u32(0)); } - } else if mode_16bit { // 16-bit mode: the index/base combination has been encoded in the base register. // this register is guaranteed to be present. @@ -334,37 +396,58 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a let mode = match (&disp, disp_size) { (&Some(_), Some(Size::BYTE)) => MOD_DISP8, (&Some(_), _) => MOD_DISP32, // well, technically 16-bit. - (&None, _) => if mode_rbp_base {MOD_DISP8} else {MOD_NODISP} + (&None, _) => { + if mode_rbp_base { + MOD_DISP8 + } else { + MOD_NODISP + } + } }; // only need a mod.r/m byte for 16-bit addressing compile_modrm_sib(buffer, mode, reg_k, base_k); if let Some(disp) = disp { - buffer.push(Stmt::ExprSigned(delimited(disp), if mode == MOD_DISP8 {Size::BYTE} else {Size::WORD})); + buffer.push(Stmt::ExprSigned( + delimited(disp), + if mode == MOD_DISP8 { + Size::BYTE + } else { + Size::WORD + }, + )); } else if mode == MOD_DISP8 { buffer.push(Stmt::u8(0)); } - } else if mode_rip_relative { // encode the RIP + disp32 or disp32 form compile_modrm_sib(buffer, MOD_NODISP, reg_k, RegKind::Static(RegId::RBP)); match ctx.mode { - X86Mode::Long => if let Some(disp) = disp { - buffer.push(Stmt::ExprSigned(delimited(disp), Size::DWORD)); - } else { - buffer.push(Stmt::u32(0)) - }, + X86Mode::Long => { + if let Some(disp) = disp { + buffer.push(Stmt::ExprSigned(delimited(disp), Size::DWORD)); + } else { + buffer.push(Stmt::u32(0)) + } + } X86Mode::Protected => { // x86 doesn't actually allow RIP-relative addressing // but we can work around it with relocations buffer.push(Stmt::u32(0)); - let disp = disp.unwrap_or_else(|| serialize::reparse(&serialize::expr_zero()).expect("Invalid expression generated")); - relocations.push((Jump::new(JumpKind::Bare(disp), None), 0, Size::DWORD, RelocationKind::Absolute)); - }, + let disp = disp.unwrap_or_else(|| { + serialize::reparse(&serialize::expr_zero()) + .expect("Invalid expression generated") + }); + relocations.push(( + Jump::new(JumpKind::Bare(disp), None), + 0, + Size::DWORD, + RelocationKind::Absolute, + )); + } } - } else { // normal addressing let no_base = base.is_none(); @@ -408,17 +491,29 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a match ctx.mode { X86Mode::Protected => { compile_modrm_sib(buffer, mode, reg_k, RegKind::Static(RegId::RBP)); - }, + } X86Mode::Long => { compile_modrm_sib(buffer, mode, reg_k, RegKind::Static(RegId::RSP)); - compile_modrm_sib(buffer, 0, RegKind::Static(RegId::RSP), RegKind::Static(RegId::RBP)); + compile_modrm_sib( + buffer, + 0, + RegKind::Static(RegId::RSP), + RegKind::Static(RegId::RBP), + ); } } } // Disp if let Some(disp) = disp { - buffer.push(Stmt::ExprSigned(delimited(disp), if mode == MOD_DISP8 {Size::BYTE} else {Size::DWORD})); + buffer.push(Stmt::ExprSigned( + delimited(disp), + if mode == MOD_DISP8 { + Size::BYTE + } else { + Size::DWORD + }, + )); } else if no_base { buffer.push(Stmt::u32(0)); } else if mode == MOD_DISP8 { @@ -427,8 +522,8 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a } // jump-target relative addressing - } else if let Some(SizedArg::IndirectJumpTarget {jump, ..}) = rm { - let reg_k = if let Some(SizedArg::Direct {reg, ..}) = reg { + } else if let Some(SizedArg::IndirectJumpTarget { jump, .. }) = rm { + let reg_k = if let Some(SizedArg::Direct { reg, .. }) = reg { reg.kind } else { RegKind::from_number(data.reg) @@ -437,8 +532,10 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a buffer.push(Stmt::u32(0)); match ctx.mode { - X86Mode::Long => relocations.push((jump, 0, Size::DWORD, RelocationKind::Relative)), - X86Mode::Protected => relocations.push((jump, 0, Size::DWORD, RelocationKind::Absolute)) + X86Mode::Long => relocations.push((jump, 0, Size::DWORD, RelocationKind::Relative)), + X86Mode::Protected => { + relocations.push((jump, 0, Size::DWORD, RelocationKind::Absolute)) + } } } @@ -451,7 +548,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a } // register in immediate argument - if let Some(SizedArg::Direct {reg: ireg, ..}) = ireg { + if let Some(SizedArg::Direct { reg: ireg, .. }) = ireg { let ireg = ireg.kind; let byte = ireg.encode() << 4; @@ -462,7 +559,11 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // if immediates are present, the register argument will be merged into the // first immediate byte. if !args.is_empty() { - if let SizedArg::Immediate {value, size: Size::BYTE} = args.remove(0) { + if let SizedArg::Immediate { + value, + size: Size::BYTE, + } = args.remove(0) + { byte = serialize::expr_mask_shift_or(&byte, &delimited(value), 0xF, 0); } else { panic!("bad formatting data") @@ -477,13 +578,13 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // immediates for arg in args { match arg { - SizedArg::Immediate {value, size} => { + SizedArg::Immediate { value, size } => { buffer.push(Stmt::ExprSigned(delimited(value), size)); // bump relocations relocations.iter_mut().for_each(|r| r.1 += size.in_bytes()); - }, - SizedArg::JumpTarget {jump, size} => { + } + SizedArg::JumpTarget { jump, size } => { // placeholder buffer.push(Stmt::Const(0, size)); @@ -493,14 +594,20 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // add the new relocation if let JumpKind::Bare(_) = &jump.kind { match ctx.mode { - X86Mode::Protected => relocations.push((jump, 0, size, RelocationKind::Extern)), - X86Mode::Long => return Err(Some("Extern relocations are not supported in x64 mode".to_string())) + X86Mode::Protected => { + relocations.push((jump, 0, size, RelocationKind::Extern)) + } + X86Mode::Long => { + return Err(Some( + "Extern relocations are not supported in x64 mode".to_string(), + )) + } } } else { relocations.push((jump, 0, size, RelocationKind::Relative)); } - }, - _ => panic!("bad immediate data") + } + _ => panic!("bad immediate data"), }; } @@ -509,7 +616,7 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a let data = [size.in_bytes(), kind.to_id()]; let data = match ctx.mode { X86Mode::Protected => &data, - X86Mode::Long => &data[..1], + X86Mode::Long => &data[..1], }; // field offset has been tracked, and ref_offset is 0 as x86 offsets are relative to the end of the instruction @@ -522,17 +629,25 @@ pub(super) fn compile_instruction(ctx: &mut Context, instruction: Instruction, a // Folds RawArgs into CleanArgs by analyzing the different raw memoryref variants fn clean_memoryref(arg: RawArg) -> Result> { Ok(match arg { - RawArg::Direct {span, reg} => CleanArg::Direct {span, reg}, - RawArg::JumpTarget {jump, size} => CleanArg::JumpTarget {jump, size}, - RawArg::IndirectJumpTarget {jump, size} => { + RawArg::Direct { span, reg } => CleanArg::Direct { span, reg }, + RawArg::JumpTarget { jump, size } => CleanArg::JumpTarget { jump, size }, + RawArg::IndirectJumpTarget { jump, size } => { if let JumpKind::Bare(_) = jump.kind { - return Err(Some("Extern indirect jumps are not supported. Use a displacement".to_string())) + return Err(Some( + "Extern indirect jumps are not supported. Use a displacement".to_string(), + )); } - CleanArg::IndirectJumpTarget {jump, size} - }, - RawArg::Immediate {value, size} => CleanArg::Immediate {value, size}, + CleanArg::IndirectJumpTarget { jump, size } + } + RawArg::Immediate { value, size } => CleanArg::Immediate { value, size }, RawArg::Invalid => return Err(None), - RawArg::IndirectRaw {span, value_size, nosplit, disp_size, items} => { + RawArg::IndirectRaw { + span, + value_size, + nosplit, + disp_size, + items, + } => { // split the ast on the memoryrefitem types let mut scaled = Vec::new(); let mut regs = Vec::new(); @@ -541,15 +656,19 @@ fn clean_memoryref(arg: RawArg) -> Result> { match item { MemoryRefItem::Register(reg) => regs.push(reg), MemoryRefItem::ScaledRegister(reg, value) => scaled.push((reg, value)), - MemoryRefItem::Displacement(expr) => disps.push(expr) + MemoryRefItem::Displacement(expr) => disps.push(expr), } } // figure out the base register if possible let mut base_reg_index = None; for (i, reg) in regs.iter().enumerate() { - if !(regs.iter().enumerate().any(|(j, other)| i != j && reg == other) || - scaled.iter().any(|&(ref other, _)| reg == other)) { + if !(regs + .iter() + .enumerate() + .any(|(j, other)| i != j && reg == other) + || scaled.iter().any(|&(ref other, _)| reg == other)) + { base_reg_index = Some(i); break; } @@ -587,7 +706,6 @@ fn clean_memoryref(arg: RawArg) -> Result> { joined_regs.pop() }; - if !joined_regs.is_empty() { emit_error!(span, "Impossible memory argument"); return Err(None); @@ -595,7 +713,8 @@ fn clean_memoryref(arg: RawArg) -> Result> { // merge disps let disp = serialize::expr_add_many(span, disps.into_iter().map(delimited)); - let disp = disp.map(|d| serialize::reparse(&d).expect("Invalid expression generated internally")); + let disp = disp + .map(|d| serialize::reparse(&d).expect("Invalid expression generated internally")); // finalize the memoryref CleanArg::Indirect { @@ -607,8 +726,17 @@ fn clean_memoryref(arg: RawArg) -> Result> { index: index.map(|(r, s)| (r, s, None)), disp, } - }, - RawArg::TypeMappedRaw {span, base_reg, scale, value_size, nosplit, disp_size, scaled_items, attribute} => { + } + RawArg::TypeMappedRaw { + span, + base_reg, + scale, + value_size, + nosplit, + disp_size, + scaled_items, + attribute, + } => { let base = base_reg; // collect registers / displacements @@ -618,7 +746,7 @@ fn clean_memoryref(arg: RawArg) -> Result> { match item { MemoryRefItem::Register(reg) => scaled.push((reg, 1)), MemoryRefItem::ScaledRegister(reg, scale) => scaled.push((reg, scale)), - MemoryRefItem::Displacement(expr) => disps.push(expr) + MemoryRefItem::Displacement(expr) => disps.push(expr), } } @@ -656,10 +784,12 @@ fn clean_memoryref(arg: RawArg) -> Result> { let scaled_disp = serialize::expr_add_many(span, disps.into_iter().map(delimited)); // scale disps (a + b + c) * size_of as disp_size - let scaled_disp = scaled_disp.map(|disp| serialize::expr_size_of_scale(&scale, &disp, true_disp_size)); + let scaled_disp = scaled_disp + .map(|disp| serialize::expr_size_of_scale(&scale, &disp, true_disp_size)); // attribute displacement offset_of(scale, attr) as disp_size - let attr_disp = attribute.map(|attr| serialize::expr_offset_of(&scale, &attr, true_disp_size)); + let attr_disp = + attribute.map(|attr| serialize::expr_offset_of(&scale, &attr, true_disp_size)); // add displacement sources together let disp = if let Some(scaled_disp) = scaled_disp { @@ -672,10 +802,12 @@ fn clean_memoryref(arg: RawArg) -> Result> { attr_disp }; - let disp = disp.map(|d| serialize::reparse(&d).expect("Invalid expression generated internally")); + let disp = disp + .map(|d| serialize::reparse(&d).expect("Invalid expression generated internally")); let index = index.map(|(r, s)| { - let scale_expr = serialize::reparse(&serialize::expr_size_of(&scale)).expect("Invalid expression generated internally"); + let scale_expr = serialize::reparse(&serialize::expr_size_of(&scale)) + .expect("Invalid expression generated internally"); (r, s, Some(scale_expr)) }); @@ -689,21 +821,31 @@ fn clean_memoryref(arg: RawArg) -> Result> { index, disp, } - }, + } }) } // Go through the CleanArgs, check for impossible to encode indirect arguments, fill in immediate/displacement size information // and return the effective address size -fn sanitize_indirects_and_sizes(ctx: &Context, args: &mut [CleanArg]) -> Result, Option> { +fn sanitize_indirects_and_sizes( + ctx: &Context, + args: &mut [CleanArg], +) -> Result, Option> { // determine if an address size prefix is necessary, and sanitize the register choice for memoryrefs let mut addr_size = None; let mut encountered_indirect = false; for arg in args.iter_mut() { match *arg { - CleanArg::Indirect {span, nosplit, ref mut disp_size, ref mut base, ref mut index, ref disp, ..} => { - + CleanArg::Indirect { + span, + nosplit, + ref mut disp_size, + ref mut base, + ref mut index, + ref disp, + .. + } => { if encountered_indirect { emit_error!(span, "Multiple memory references in a single instruction") } @@ -727,24 +869,33 @@ fn sanitize_indirects_and_sizes(ctx: &Context, args: &mut [CleanArg]) -> Result< // 16-bit addressing has smaller displacements if addr_size == Some(Size::WORD) { if size != Size::BYTE && size != Size::WORD { - emit_error!(span, "Invalid displacement size, only BYTE or WORD are possible"); + emit_error!( + span, + "Invalid displacement size, only BYTE or WORD are possible" + ); } } else if size != Size::BYTE && size != Size::DWORD { - emit_error!(span, "Invalid displacement size, only BYTE or DWORD are possible"); + emit_error!( + span, + "Invalid displacement size, only BYTE or DWORD are possible" + ); } } else if let Some(ref disp) = *disp { match derive_size(disp) { - Some(Size::BYTE) => *disp_size = Some(Size::BYTE), + Some(Size::BYTE) => *disp_size = Some(Size::BYTE), Some(_) if addr_size == Some(Size::WORD) => *disp_size = Some(Size::WORD), - Some(_) => *disp_size = Some(Size::DWORD), - None => () + Some(_) => *disp_size = Some(Size::DWORD), + None => (), } } - }, - CleanArg::Immediate {ref value, size: ref mut size @ None} => { + } + CleanArg::Immediate { + ref value, + size: ref mut size @ None, + } => { *size = derive_size(value); - }, - _ => () + } + _ => (), } } @@ -760,7 +911,7 @@ fn derive_size(expr: &syn::Expr) -> Option { } match inner { - syn::Expr::Lit(syn::ExprLit { ref lit, .. } ) => match lit { + syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => match lit { syn::Lit::Byte(_) => Some(Size::BYTE), syn::Lit::Int(i) => match i.base10_parse::() { Ok(x) if x < 0x80 => Some(Size::BYTE), @@ -768,10 +919,14 @@ fn derive_size(expr: &syn::Expr) -> Option { Ok(x) if x < 0x8000_0000 => Some(Size::DWORD), _ => Some(Size::QWORD), }, - _ => None + _ => None, }, - syn::Expr::Unary(syn::ExprUnary { op: syn::UnOp::Neg(_), ref expr, .. } ) => match &**expr { - syn::Expr::Lit(syn::ExprLit { ref lit, .. } ) => match lit { + syn::Expr::Unary(syn::ExprUnary { + op: syn::UnOp::Neg(_), + ref expr, + .. + }) => match &**expr { + syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => match lit { syn::Lit::Byte(_) => Some(Size::BYTE), syn::Lit::Int(i) => match i.base10_parse::() { Ok(x) if x <= 0x80 => Some(Size::BYTE), @@ -779,20 +934,23 @@ fn derive_size(expr: &syn::Expr) -> Option { Ok(x) if x <= 0x8000_0000 => Some(Size::DWORD), _ => Some(Size::QWORD), }, - _ => None + _ => None, }, - _ => None + _ => None, }, - _ => None + _ => None, } } /// Validates that the base/index combination can actually be encoded and returns the effective address size. /// If the address size can't be determined (purely displacement, or VSIB without base), the result is None. -fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option, - index: &mut Option<(Register, isize, Option)>) -> Result, Option> -{ - +fn sanitize_indirect( + ctx: &Context, + span: Span, + nosplit: bool, + base: &mut Option, + index: &mut Option<(Register, isize, Option)>, +) -> Result, Option> { // figure out the addressing size/mode used. // size can be 16, 32, or 64-bit. // mode can be legacy, rip-relative, or vsib @@ -807,55 +965,62 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option // figure out the addressing mode and size match (&b, &i) { (&None, &None) => return Ok(None), - (&Some((f, s)), &None) | - (&None, &Some((f, s))) => { + (&Some((f, s)), &None) | (&None, &Some((f, s))) => { size = s; family = f; - }, - (&Some((f1, s1)), &Some((f2, s2))) => if f1 == f2 { - if s1 != s2 { - emit_error!(span, "Registers of differing sizes"); + } + (&Some((f1, s1)), &Some((f2, s2))) => { + if f1 == f2 { + if s1 != s2 { + emit_error!(span, "Registers of differing sizes"); + return Err(None); + } + size = s1; + family = f1; + + // allow only vsib addressing + } else if f1 == RegFamily::XMM { + vsib_mode = true; + size = s2; + family = f2; + } else if f2 == RegFamily::XMM { + vsib_mode = true; + size = s1; + family = f1; + } else { + emit_error!(span, "Register type combination not supported"); return Err(None); } - size = s1; - family = f1; - - // allow only vsib addressing - } else if f1 == RegFamily::XMM { - vsib_mode = true; - size = s2; - family = f2; - } else if f2 == RegFamily::XMM { - vsib_mode = true; - size = s1; - family = f1; - } else { - emit_error!(span, "Register type combination not supported"); - return Err(None); } } // filter out combinations that are impossible to encode match family { - RegFamily::RIP => if b.is_some() && i.is_some() { - emit_error!(span, "Register type combination not supported"); - return Err(None); - }, + RegFamily::RIP => { + if b.is_some() && i.is_some() { + emit_error!(span, "Register type combination not supported"); + return Err(None); + } + } RegFamily::LEGACY => match size { Size::DWORD => (), Size::QWORD => (), // only valid in long mode, but should only be possible in long mode - Size::WORD => if ctx.mode == X86Mode::Protected || vsib_mode { - emit_error!(span, "16-bit addressing is not supported in this mode"); - return Err(None); - }, + Size::WORD => { + if ctx.mode == X86Mode::Protected || vsib_mode { + emit_error!(span, "16-bit addressing is not supported in this mode"); + return Err(None); + } + } _ => { emit_error!(span, "Register type not supported"); return Err(None); } }, - RegFamily::XMM => if b.is_some() && i.is_some() { - emit_error!(span, "Register type combination not supported"); - }, + RegFamily::XMM => { + if b.is_some() && i.is_some() { + emit_error!(span, "Register type combination not supported"); + } + } _ => { emit_error!(span, "Register type not supported"); return Err(None); @@ -870,8 +1035,8 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option Some(_) => { emit_error!(span, "RIP cannot be scaled"); return Err(None); - }, - None => () + } + None => (), } return Ok(Some(size)); } @@ -895,7 +1060,10 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option if let (ref mut i, 1, None) = index.as_mut().unwrap() { swap(i, base.as_mut().unwrap()) } else { - emit_error!(span, "vsib addressing requires a general purpose register as base"); + emit_error!( + span, + "vsib addressing requires a general purpose register as base" + ); return Err(None); } } @@ -913,7 +1081,7 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option Some(_) => { emit_error!(span, "16-bit addressing with scaled index"); return Err(None); - }, + } }; if first_reg.is_none() { @@ -921,14 +1089,30 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option } let encoded_base = match (&first_reg, &second_reg) { - (r1, r2) if (r1 == &RegId::RBX && r2 == &RegId::RSI) || - (r1 == &RegId::RSI && r2 == &RegId::RBX) => RegId::from_number(0), - (r1, r2) if (r1 == &RegId::RBX && r2 == &RegId::RDI) || - (r1 == &RegId::RDI && r2 == &RegId::RBX) => RegId::from_number(1), - (r1, r2) if (r1 == &RegId::RBP && r2 == &RegId::RSI) || - (r1 == &RegId::RSI && r2 == &RegId::RBP) => RegId::from_number(2), - (r1, r2) if (r1 == &RegId::RBP && r2 == &RegId::RDI) || - (r1 == &RegId::RDI && r2 == &RegId::RBP) => RegId::from_number(3), + (r1, r2) + if (r1 == &RegId::RBX && r2 == &RegId::RSI) + || (r1 == &RegId::RSI && r2 == &RegId::RBX) => + { + RegId::from_number(0) + } + (r1, r2) + if (r1 == &RegId::RBX && r2 == &RegId::RDI) + || (r1 == &RegId::RDI && r2 == &RegId::RBX) => + { + RegId::from_number(1) + } + (r1, r2) + if (r1 == &RegId::RBP && r2 == &RegId::RSI) + || (r1 == &RegId::RSI && r2 == &RegId::RBP) => + { + RegId::from_number(2) + } + (r1, r2) + if (r1 == &RegId::RBP && r2 == &RegId::RDI) + || (r1 == &RegId::RDI && r2 == &RegId::RBP) => + { + RegId::from_number(3) + } (r, None) if r == &RegId::RSI => RegId::from_number(4), (r, None) if r == &RegId::RDI => RegId::from_number(5), (r, None) if r == &RegId::RBP => RegId::from_number(6), @@ -952,8 +1136,8 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option 2 | 3 | 5 | 9 => { *base = Some(reg.clone()); *scale -= 1 - }, - _ => () + } + _ => (), } } } @@ -974,7 +1158,11 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option } // RSP, R12 or a dynamic register as base without index (add an index so we escape into SIB) - if index.is_none() && (*base == RegId::RSP || *base == RegId::R12 || base.as_ref().map_or(false, |r| r.kind.is_dynamic())) { + if index.is_none() + && (*base == RegId::RSP + || *base == RegId::R12 + || base.as_ref().map_or(false, |r| r.kind.is_dynamic())) + { *index = Some((Register::new_static(size, RegId::RSP), 1, None)); } @@ -982,7 +1170,11 @@ fn sanitize_indirect(ctx: &Context, span: Span, nosplit: bool, base: &mut Option Ok(Some(size)) } -fn match_op_format(ctx: &Context, ident: &syn::Ident, args: &[CleanArg]) -> Result<&'static Opdata, Option> { +fn match_op_format( + ctx: &Context, + ident: &syn::Ident, + args: &[CleanArg], +) -> Result<&'static Opdata, Option> { let name = ident.to_string(); let name = name.as_str(); @@ -999,9 +1191,11 @@ fn match_op_format(ctx: &Context, ident: &syn::Ident, args: &[CleanArg]) -> Resu } } - Err(Some( - format!("'{}': argument type/size mismatch, expected one of the following forms:\n{}", name, format_opdata_list(name, data)) - )) + Err(Some(format!( + "'{}': argument type/size mismatch, expected one of the following forms:\n{}", + name, + format_opdata_list(name, data) + ))) } fn match_format_string(ctx: &Context, fmt: &Opdata, args: &[CleanArg]) -> Result<(), &'static str> { @@ -1051,70 +1245,119 @@ fn match_format_string(ctx: &Context, fmt: &Opdata, args: &[CleanArg]) -> Result let size = match (code, arg) { // immediates - (b'i', &CleanArg::Immediate{size, ..}) | - (b'o', &CleanArg::Immediate{size, ..}) | - (b'o', &CleanArg::JumpTarget{size, ..}) => size, + (b'i', &CleanArg::Immediate { size, .. }) + | (b'o', &CleanArg::Immediate { size, .. }) + | (b'o', &CleanArg::JumpTarget { size, .. }) => size, // specific legacy regs - (x @ b'A' ..= b'P', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::LEGACY && - reg.kind.code() == Some(x - b'A') => Some(reg.size()), + (x @ b'A'..=b'P', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::LEGACY && reg.kind.code() == Some(x - b'A') => + { + Some(reg.size()) + } // specific segment regs - (x @ b'Q' ..= b'V', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::SEGMENT && - reg.kind.code() == Some(x - b'Q') => Some(reg.size()), + (x @ b'Q'..=b'V', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::SEGMENT && reg.kind.code() == Some(x - b'Q') => + { + Some(reg.size()) + } // CR8 can be specially referenced - (b'W', &CleanArg::Direct{ref reg, ..}) if - reg.kind == RegId::CR8 => Some(reg.size()), + (b'W', &CleanArg::Direct { ref reg, .. }) if reg.kind == RegId::CR8 => Some(reg.size()), // top of the fp stack is also often used - (b'X', &CleanArg::Direct{ref reg, ..}) if - reg.kind == RegId::ST0 => Some(reg.size()), + (b'X', &CleanArg::Direct { ref reg, .. }) if reg.kind == RegId::ST0 => Some(reg.size()), // generic legacy regs - (b'r', &CleanArg::Direct{ref reg, ..}) | - (b'v', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::LEGACY || - reg.kind.family() == RegFamily::HIGHBYTE => Some(reg.size()), + (b'r', &CleanArg::Direct { ref reg, .. }) + | (b'v', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::LEGACY + || reg.kind.family() == RegFamily::HIGHBYTE => + { + Some(reg.size()) + } // other reg types often mixed with memory refs - (b'x', &CleanArg::Direct{ref reg, ..}) | - (b'u', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::MMX => Some(reg.size()), - (b'y', &CleanArg::Direct{ref reg, ..}) | - (b'w', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::XMM => Some(reg.size()), + (b'x', &CleanArg::Direct { ref reg, .. }) + | (b'u', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::MMX => + { + Some(reg.size()) + } + (b'y', &CleanArg::Direct { ref reg, .. }) + | (b'w', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::XMM => + { + Some(reg.size()) + } // other reg types - (b'f', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::FP => Some(reg.size()), - (b's', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::SEGMENT => Some(reg.size()), - (b'c', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::CONTROL => Some(reg.size()), - (b'd', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::DEBUG => Some(reg.size()), - (b'b', &CleanArg::Direct{ref reg, ..}) if - reg.kind.family() == RegFamily::BOUND => Some(reg.size()), + (b'f', &CleanArg::Direct { ref reg, .. }) if reg.kind.family() == RegFamily::FP => { + Some(reg.size()) + } + (b's', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::SEGMENT => + { + Some(reg.size()) + } + (b'c', &CleanArg::Direct { ref reg, .. }) + if reg.kind.family() == RegFamily::CONTROL => + { + Some(reg.size()) + } + (b'd', &CleanArg::Direct { ref reg, .. }) if reg.kind.family() == RegFamily::DEBUG => { + Some(reg.size()) + } + (b'b', &CleanArg::Direct { ref reg, .. }) if reg.kind.family() == RegFamily::BOUND => { + Some(reg.size()) + } // memory offsets - (b'm', &CleanArg::Indirect {size, ref index, ..}) | - (b'u' ..= b'w', &CleanArg::Indirect {size, ref index, ..}) if - index.is_none() || index.as_ref().unwrap().0.kind.family() != RegFamily::XMM => size, + ( + b'm', + &CleanArg::Indirect { + size, ref index, .. + }, + ) + | ( + b'u'..=b'w', + &CleanArg::Indirect { + size, ref index, .. + }, + ) if index.is_none() || index.as_ref().unwrap().0.kind.family() != RegFamily::XMM => { + size + } - (b'm', &CleanArg::IndirectJumpTarget {size, ..}) | - (b'u' ..= b'w', &CleanArg::IndirectJumpTarget {size, ..}) => size, + (b'm', &CleanArg::IndirectJumpTarget { size, .. }) + | (b'u'..=b'w', &CleanArg::IndirectJumpTarget { size, .. }) => size, // vsib addressing. as they have two sizes that must be checked they check one of the sizes here - (b'k', &CleanArg::Indirect {size, index: Some((ref index, _, _)), ..}) if - (size.is_none() || size == Some(Size::DWORD)) && - index.kind.family() == RegFamily::XMM => Some(index.size()), - (b'l', &CleanArg::Indirect {size, index: Some((ref index, _, _)), ..}) if - (size.is_none() || size == Some(Size::QWORD)) && - index.kind.family() == RegFamily::XMM => Some(index.size()), - _ => return Err("argument type mismatch") + ( + b'k', + &CleanArg::Indirect { + size, + index: Some((ref index, _, _)), + .. + }, + ) if (size.is_none() || size == Some(Size::DWORD)) + && index.kind.family() == RegFamily::XMM => + { + Some(index.size()) + } + ( + b'l', + &CleanArg::Indirect { + size, + index: Some((ref index, _, _)), + .. + }, + ) if (size.is_none() || size == Some(Size::QWORD)) + && index.kind.family() == RegFamily::XMM => + { + Some(index.size()) + } + _ => return Err("argument type mismatch"), }; // if size is none it always matches (and will later be coerced to a more specific type if the match is successful) @@ -1126,27 +1369,26 @@ fn match_format_string(ctx: &Context, fmt: &Opdata, args: &[CleanArg]) -> Result (b'q', b'i') => size <= Size::QWORD, (b'*', b'i') => size <= Size::DWORD, // normal size matches - (b'b', _) => size == Size::BYTE, - (b'w', _) => size == Size::WORD, - (b'd', _) => size == Size::DWORD, - (b'q', _) => size == Size::QWORD, - (b'f', _) => size == Size::FWORD, - (b'p', _) => size == Size::PWORD, - (b'o', _) => size == Size::OWORD, - (b'h', _) => size == Size::HWORD, + (b'b', _) => size == Size::BYTE, + (b'w', _) => size == Size::WORD, + (b'd', _) => size == Size::DWORD, + (b'q', _) => size == Size::QWORD, + (b'f', _) => size == Size::FWORD, + (b'p', _) => size == Size::PWORD, + (b'o', _) => size == Size::OWORD, + (b'h', _) => size == Size::HWORD, // what is allowed for wildcards - (b'*', b'k') | - (b'*', b'l') | - (b'*', b'y') | - (b'*', b'w') => size == Size::OWORD || size == Size::HWORD, - (b'*', b'r') | - (b'*', b'A' ..= b'P') | - (b'*', b'v') => size == Size::WORD || size == Size::DWORD || size == Size::QWORD, + (b'*', b'k') | (b'*', b'l') | (b'*', b'y') | (b'*', b'w') => { + size == Size::OWORD || size == Size::HWORD + } + (b'*', b'r') | (b'*', b'A'..=b'P') | (b'*', b'v') => { + size == Size::WORD || size == Size::DWORD || size == Size::QWORD + } (b'*', b'm') => true, - (b'*', _) => panic!("Invalid size wildcard"), - (b'?', _) => true, - (b'!', _) => false, - _ => panic!("invalid format string") + (b'*', _) => panic!("Invalid size wildcard"), + (b'?', _) => true, + (b'!', _) => false, + _ => panic!("invalid format string"), } { return Err("argument size mismatch"); } @@ -1162,7 +1404,10 @@ fn match_format_string(ctx: &Context, fmt: &Opdata, args: &[CleanArg]) -> Result Ok(()) } -fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec), Option> { +fn size_operands( + fmt: &Opdata, + args: Vec, +) -> Result<(Option, Vec), Option> { // sizing operands requires two passes. // In the first one, we determine the effective operand size if necessary (if *'s are present) // In the second one, we create the final sized AST @@ -1178,15 +1423,15 @@ fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec } match *arg { - CleanArg::Direct {ref reg, ..} => { + CleanArg::Direct { ref reg, .. } => { has_arg = true; let size = reg.size(); - if op_size.map_or(false, |s| s != size,) { + if op_size.map_or(false, |s| s != size) { return Err(Some("Conflicting operand sizes".to_string())); } op_size = Some(size); - }, - CleanArg::IndirectJumpTarget {size, ..} => { + } + CleanArg::IndirectJumpTarget { size, .. } => { has_arg = true; if let Some(size) = size { if op_size.map_or(false, |s| s != size) { @@ -1195,7 +1440,11 @@ fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec op_size = Some(size); } } - CleanArg::Indirect {mut size, ref index, ..} => { + CleanArg::Indirect { + mut size, + ref index, + .. + } => { has_arg = true; // VSIB addressing if let Some((ref reg, _, _)) = *index { @@ -1210,9 +1459,8 @@ fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec } op_size = Some(size); } - }, - CleanArg::Immediate {size, ..} | - CleanArg::JumpTarget {size, ..} => { + } + CleanArg::Immediate { size, .. } | CleanArg::JumpTarget { size, .. } => { if im_size.is_some() { panic!("Bad formatting data? multiple immediates with wildcard size"); } @@ -1222,7 +1470,7 @@ fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec } if let Some(o) = op_size { - let ref_im_size = if o > Size::DWORD {Size::DWORD} else {o}; + let ref_im_size = if o > Size::DWORD { Size::DWORD } else { o }; if let Some(i) = im_size { if i > ref_im_size { return Err(Some("Immediate size mismatch".to_string())); @@ -1236,15 +1484,12 @@ fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec // fill-in loop. default should never be used. let mut new_args = Vec::new(); for (arg, (code, fsize)) in args.into_iter().zip(FormatStringIterator::new(&fmt.args)) { - //get the specified operand size from the format string let size = match (fsize, code) { (b'b', _) => Size::BYTE, (b'w', _) => Size::WORD, - (_, b'k') | - (b'd', _) => Size::DWORD, - (_, b'l') | - (b'q', _) => Size::QWORD, + (_, b'k') | (b'd', _) => Size::DWORD, + (_, b'l') | (b'q', _) => Size::QWORD, (b'f', _) => Size::FWORD, (b'p', _) => Size::PWORD, (b'o', _) => Size::OWORD, @@ -1252,66 +1497,83 @@ fn size_operands(fmt: &Opdata, args: Vec) -> Result<(Option, Vec (b'*', b'i') => im_size.unwrap(), (b'*', _) => op_size.unwrap(), (b'!', _) => Size::BYTE, // will never be used, placeholder - _ => unreachable!() + _ => unreachable!(), }; new_args.push(match arg { - CleanArg::Direct {span, reg} => - SizedArg::Direct {span, reg}, - CleanArg::JumpTarget {jump, ..} => - SizedArg::JumpTarget {jump, size}, - CleanArg::IndirectJumpTarget {jump, ..} => - SizedArg::IndirectJumpTarget {jump}, - CleanArg::Immediate {value, ..} => - SizedArg::Immediate {value, size}, - CleanArg::Indirect {span, disp_size, base, index, disp, ..} => - SizedArg::Indirect {span, disp_size, base, index, disp}, + CleanArg::Direct { span, reg } => SizedArg::Direct { span, reg }, + CleanArg::JumpTarget { jump, .. } => SizedArg::JumpTarget { jump, size }, + CleanArg::IndirectJumpTarget { jump, .. } => SizedArg::IndirectJumpTarget { jump }, + CleanArg::Immediate { value, .. } => SizedArg::Immediate { value, size }, + CleanArg::Indirect { + span, + disp_size, + base, + index, + disp, + .. + } => SizedArg::Indirect { + span, + disp_size, + base, + index, + disp, + }, }); } Ok((op_size, new_args)) } -fn get_legacy_prefixes(fmt: &'static Opdata, idents: Vec) -> Result<(Option, Option), Option> { +fn get_legacy_prefixes( + fmt: &'static Opdata, + idents: Vec, +) -> Result<(Option, Option), Option> { let mut group1 = None; let mut group2 = None; for prefix in idents { let name = prefix.to_string(); let (group, value) = match name.as_str() { - "rep" => if fmt.flags.contains(Flags::REP) { - (&mut group1, 0xF3) - } else { - emit_error!(prefix, "Cannot use prefix {} on this instruction", name); - return Err(None); - }, - "repe" | - "repz" => if fmt.flags.contains(Flags::REPE) { - (&mut group1, 0xF3) - } else { - emit_error!(prefix, "Cannot use prefix {} on this instruction", name); - return Err(None); - }, - "repnz" | - "repne" => if fmt.flags.contains(Flags::REPE) { - (&mut group1, 0xF2) - } else { - emit_error!(prefix, "Cannot use prefix {} on this instruction", name); - return Err(None); - }, - "lock" => if fmt.flags.contains(Flags::LOCK) { - (&mut group1, 0xF0) - } else { - emit_error!(prefix, "Cannot use prefix {} on this instruction", name); - return Err(None); - }, - "ss" => (&mut group2, 0x36), - "cs" => (&mut group2, 0x2E), - "ds" => (&mut group2, 0x3E), - "es" => (&mut group2, 0x26), - "fs" => (&mut group2, 0x64), - "gs" => (&mut group2, 0x65), - _ => panic!("unimplemented prefix") + "rep" => { + if fmt.flags.contains(Flags::REP) { + (&mut group1, 0xF3) + } else { + emit_error!(prefix, "Cannot use prefix {} on this instruction", name); + return Err(None); + } + } + "repe" | "repz" => { + if fmt.flags.contains(Flags::REPE) { + (&mut group1, 0xF3) + } else { + emit_error!(prefix, "Cannot use prefix {} on this instruction", name); + return Err(None); + } + } + "repnz" | "repne" => { + if fmt.flags.contains(Flags::REPE) { + (&mut group1, 0xF2) + } else { + emit_error!(prefix, "Cannot use prefix {} on this instruction", name); + return Err(None); + } + } + "lock" => { + if fmt.flags.contains(Flags::LOCK) { + (&mut group1, 0xF0) + } else { + emit_error!(prefix, "Cannot use prefix {} on this instruction", name); + return Err(None); + } + } + "ss" => (&mut group2, 0x36), + "cs" => (&mut group2, 0x2E), + "ds" => (&mut group2, 0x3E), + "es" => (&mut group2, 0x26), + "fs" => (&mut group2, 0x64), + "gs" => (&mut group2, 0x65), + _ => panic!("unimplemented prefix"), }; if group.is_some() { emit_error!(prefix, "Duplicate prefix group"); @@ -1323,54 +1585,81 @@ fn get_legacy_prefixes(fmt: &'static Opdata, idents: Vec) -> Result< Ok((group1, group2)) } -fn check_rex(ctx: &Context, fmt: &'static Opdata, args: &[SizedArg], rex_w: bool) -> Result> { +fn check_rex( + ctx: &Context, + fmt: &'static Opdata, + args: &[SizedArg], + rex_w: bool, +) -> Result> { // performs checks for not encodable arg combinations // output arg indicates if a rex prefix can be encoded if ctx.mode == X86Mode::Protected { if rex_w { - return Err(Some("Does not support 64 bit operand size in 32-bit mode".to_string())); + return Err(Some( + "Does not support 64 bit operand size in 32-bit mode".to_string(), + )); } else { return Ok(false); } } - let mut requires_rex = rex_w; + let mut requires_rex = rex_w; let mut requires_no_rex = false; for (arg, (c, _)) in args.iter().zip(FormatStringIterator::new(fmt.args)) { // only scan args that are actually encoded - if let b'a' ..= b'z' = c { + if let b'a'..=b'z' = c { match *arg { - SizedArg::Direct {ref reg, ..} => { + SizedArg::Direct { ref reg, .. } => { if reg.kind.family() == RegFamily::HIGHBYTE { requires_no_rex = true; - - } else if reg.kind.is_extended() || (reg.size() == Size::BYTE && - (reg.kind == RegId::RSP || reg.kind == RegId::RBP || reg.kind == RegId::RSI || reg.kind == RegId::RDI)) { + } else if reg.kind.is_extended() + || (reg.size() == Size::BYTE + && (reg.kind == RegId::RSP + || reg.kind == RegId::RBP + || reg.kind == RegId::RSI + || reg.kind == RegId::RDI)) + { requires_rex = true; } - }, - SizedArg::Indirect {ref base, ref index, ..} => { + } + SizedArg::Indirect { + ref base, + ref index, + .. + } => { if let Some(ref reg) = *base { requires_rex = requires_rex || reg.kind.is_extended(); } if let Some((ref reg, _, _)) = *index { requires_rex = requires_rex || reg.kind.is_extended(); } - }, + } _ => (), } } } if requires_rex && requires_no_rex { - Err(Some("High byte register combined with extended registers or 64-bit operand size".to_string())) + Err(Some( + "High byte register combined with extended registers or 64-bit operand size" + .to_string(), + )) } else { Ok(requires_rex) } } -fn extract_args(fmt: &'static Opdata, args: Vec) -> (Option, Option, Option, Option, Vec) { +fn extract_args( + fmt: &'static Opdata, + args: Vec, +) -> ( + Option, + Option, + Option, + Option, + Vec, +) { // way operand order works: // if there's a memory/reg operand, this operand goes into modrm.r/m @@ -1389,21 +1678,25 @@ fn extract_args(fmt: &'static Opdata, args: Vec) -> (Option, for (arg, (c, _)) in args.into_iter().zip(FormatStringIterator::new(fmt.args)) { match c { - b'm' | b'u' | b'v' | b'w' | b'k' | b'l' => if memarg.is_some() { - panic!("multiple memory arguments in format string"); - } else { - memarg = Some(regs.len()); - regs.push(arg) - }, + b'm' | b'u' | b'v' | b'w' | b'k' | b'l' => { + if memarg.is_some() { + panic!("multiple memory arguments in format string"); + } else { + memarg = Some(regs.len()); + regs.push(arg) + } + } b'f' | b'x' | b'r' | b'y' | b'b' => regs.push(arg), - b'c' | b'd' | b's' => if regarg.is_some() { - panic!("multiple segment, debug or control registers in format string"); - } else { - regarg = Some(regs.len()); - regs.push(arg) - }, + b'c' | b'd' | b's' => { + if regarg.is_some() { + panic!("multiple segment, debug or control registers in format string"); + } else { + regarg = Some(regs.len()); + regs.push(arg) + } + } b'i' | b'o' => immediates.push(arg), - _ => () // hardcoded regs don't have to be encoded + _ => (), // hardcoded regs don't have to be encoded } } @@ -1476,22 +1769,27 @@ fn encode_scale(scale: isize) -> Option { 2 => Some(1), 4 => Some(2), 8 => Some(3), - _ => None + _ => None, } } fn compile_rex(buffer: &mut Vec, rex_w: bool, reg: &Option, rm: &Option) { - let mut reg_k = RegKind::from_number(0); + let mut reg_k = RegKind::from_number(0); let mut index_k = RegKind::from_number(0); - let mut base_k = RegKind::from_number(0); + let mut base_k = RegKind::from_number(0); - if let Some(SizedArg::Direct {ref reg, ..}) = *reg { + if let Some(SizedArg::Direct { ref reg, .. }) = *reg { reg_k = reg.kind.clone(); } - if let Some(SizedArg::Direct {ref reg, ..}) = *rm { + if let Some(SizedArg::Direct { ref reg, .. }) = *rm { base_k = reg.kind.clone(); } - if let Some(SizedArg::Indirect {ref base, ref index, ..} ) = *rm { + if let Some(SizedArg::Indirect { + ref base, + ref index, + .. + }) = *rm + { if let Some(ref base) = *base { base_k = base.kind.clone(); } @@ -1500,10 +1798,11 @@ fn compile_rex(buffer: &mut Vec, rex_w: bool, reg: &Option, rm: } } - let rex = 0x40 | (rex_w as u8) << 3 | - (reg_k.encode() & 8) >> 1 | - (index_k.encode() & 8) >> 2 | - (base_k.encode() & 8) >> 3 ; + let rex = 0x40 + | (rex_w as u8) << 3 + | (reg_k.encode() & 8) >> 1 + | (index_k.encode() & 8) >> 2 + | (base_k.encode() & 8) >> 3; if !reg_k.is_dynamic() && !index_k.is_dynamic() && !base_k.is_dynamic() { buffer.push(Stmt::u8(rex)); return; @@ -1523,22 +1822,37 @@ fn compile_rex(buffer: &mut Vec, rex_w: bool, reg: &Option, rm: buffer.push(Stmt::ExprUnsigned(rex, Size::BYTE)); } -fn compile_vex_xop(mode: X86Mode, buffer: &mut Vec, data: &'static Opdata, reg: &Option, -rm: &Option, map_sel: u8, rex_w: bool, vvvv: &Option, vex_l: bool, prefix: u8) { - let mut reg_k = RegKind::from_number(0); +fn compile_vex_xop( + mode: X86Mode, + buffer: &mut Vec, + data: &'static Opdata, + reg: &Option, + rm: &Option, + map_sel: u8, + rex_w: bool, + vvvv: &Option, + vex_l: bool, + prefix: u8, +) { + let mut reg_k = RegKind::from_number(0); let mut index_k = RegKind::from_number(0); - let mut base_k = RegKind::from_number(0); - let mut vvvv_k = RegKind::from_number(0); + let mut base_k = RegKind::from_number(0); + let mut vvvv_k = RegKind::from_number(0); let byte1 = match mode { X86Mode::Long => { - if let Some(SizedArg::Direct {ref reg, ..}) = *reg { + if let Some(SizedArg::Direct { ref reg, .. }) = *reg { reg_k = reg.kind.clone(); } - if let Some(SizedArg::Direct {ref reg, ..}) = *rm { + if let Some(SizedArg::Direct { ref reg, .. }) = *rm { base_k = reg.kind.clone(); } - if let Some(SizedArg::Indirect {ref base, ref index, ..}) = *rm { + if let Some(SizedArg::Indirect { + ref base, + ref index, + .. + }) = *rm + { if let Some(ref base) = *base { base_k = base.kind.clone(); } @@ -1547,27 +1861,26 @@ rm: &Option, map_sel: u8, rex_w: bool, vvvv: &Option, vex_l: } } - (map_sel & 0x1F) | - (!reg_k.encode() & 8) << 4 | - (!index_k.encode() & 8) << 3 | - (!base_k.encode() & 8) << 2 - }, - X86Mode::Protected => { - (map_sel & 0x1f) | 0xE0 + (map_sel & 0x1F) + | (!reg_k.encode() & 8) << 4 + | (!index_k.encode() & 8) << 3 + | (!base_k.encode() & 8) << 2 } + X86Mode::Protected => (map_sel & 0x1f) | 0xE0, }; - if let Some(SizedArg::Direct {ref reg, ..}) = *vvvv { + if let Some(SizedArg::Direct { ref reg, .. }) = *vvvv { vvvv_k = reg.kind.clone(); } - let byte2 = (prefix & 0x3) | - (rex_w as u8) << 7 | - (!vvvv_k.encode() & 0xF) << 3 | - (vex_l as u8) << 2 ; + let byte2 = + (prefix & 0x3) | (rex_w as u8) << 7 | (!vvvv_k.encode() & 0xF) << 3 | (vex_l as u8) << 2; - if data.flags.contains(Flags::VEX_OP) && (byte1 & 0x7F) == 0x61 && (byte2 & 0x80) == 0 && - ((!index_k.is_dynamic() && !base_k.is_dynamic()) || mode == X86Mode::Protected) { + if data.flags.contains(Flags::VEX_OP) + && (byte1 & 0x7F) == 0x61 + && (byte2 & 0x80) == 0 + && ((!index_k.is_dynamic() && !base_k.is_dynamic()) || mode == X86Mode::Protected) + { // 2-byte vex buffer.push(Stmt::u8(0xC5)); @@ -1588,9 +1901,14 @@ rm: &Option, map_sel: u8, rex_w: bool, vvvv: &Option, vex_l: return; } - buffer.push(Stmt::u8(if data.flags.contains(Flags::VEX_OP) {0xC4} else {0x8F})); + buffer.push(Stmt::u8(if data.flags.contains(Flags::VEX_OP) { + 0xC4 + } else { + 0x8F + })); - if mode == X86Mode::Long && (reg_k.is_dynamic() || index_k.is_dynamic() || base_k.is_dynamic()) { + if mode == X86Mode::Long && (reg_k.is_dynamic() || index_k.is_dynamic() || base_k.is_dynamic()) + { let mut byte1: TokenTree = proc_macro2::Literal::u8_suffixed(byte1).into(); if let RegKind::Dynamic(_, expr) = reg_k { @@ -1620,9 +1938,7 @@ rm: &Option, map_sel: u8, rex_w: bool, vvvv: &Option, vex_l: } fn compile_modrm_sib(buffer: &mut Vec, mode: u8, reg1: RegKind, reg2: RegKind) { - let byte = mode << 6 | - (reg1.encode() & 7) << 3 | - (reg2.encode() & 7) ; + let byte = mode << 6 | (reg1.encode() & 7) << 3 | (reg2.encode() & 7); if !reg1.is_dynamic() && !reg2.is_dynamic() { buffer.push(Stmt::u8(byte)); @@ -1640,9 +1956,14 @@ fn compile_modrm_sib(buffer: &mut Vec, mode: u8, reg1: RegKind, reg2: RegK buffer.push(Stmt::ExprUnsigned(byte, Size::BYTE)); } -fn compile_sib_dynscale(buffer: &mut Vec, scale: u8, scale_expr: syn::Expr, reg1: RegKind, reg2: RegKind) { - let byte = (reg1.encode() & 7) << 3 | - (reg2.encode() & 7) ; +fn compile_sib_dynscale( + buffer: &mut Vec, + scale: u8, + scale_expr: syn::Expr, + reg1: RegKind, + reg2: RegKind, +) { + let byte = (reg1.encode() & 7) << 3 | (reg2.encode() & 7); let mut byte: TokenTree = proc_macro2::Literal::u8_suffixed(byte).into(); let scale: TokenTree = proc_macro2::Literal::u8_unsuffixed(scale).into(); @@ -1658,10 +1979,10 @@ fn compile_sib_dynscale(buffer: &mut Vec, scale: u8, scale_expr: syn::Expr let scale_expr = delimited(scale_expr); let (expr1, expr2) = serialize::expr_dynscale( - &delimited(quote_spanned!{ span=> + &delimited(quote_spanned! { span=> #scale_expr * #scale }), - &byte + &byte, ); buffer.push(Stmt::PrefixStmt(expr1)); buffer.push(Stmt::ExprUnsigned(expr2, Size::BYTE)); diff --git a/plugin/src/arch/x64/debug.rs b/plugin/src/arch/x64/debug.rs index 8780139d42..77ccee991a 100644 --- a/plugin/src/arch/x64/debug.rs +++ b/plugin/src/arch/x64/debug.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; -use super::compiler::{Opdata, FormatStringIterator}; +use super::compiler::{FormatStringIterator, Opdata}; use super::x64data::Flags; pub fn format_opdata_list(name: &str, data: &[Opdata]) -> String { @@ -12,12 +12,19 @@ pub fn format_opdata_list(name: &str, data: &[Opdata]) -> String { } pub fn format_opdata(name: &str, data: &Opdata) -> Vec { - let opsizes = if data.flags.contains(Flags::AUTO_SIZE) {&b"qwd"[..]} - else if data.flags.contains(Flags::AUTO_NO32) {&b"qw"[..]} - else if data.flags.contains(Flags::AUTO_REXW) {&b"qd"[..]} - else if data.flags.contains(Flags::AUTO_VEXL) {&b"ho"[..]} - else if name == "monitorx" {&b"qwd"[..]} - else {&b"!"[..]}; + let opsizes = if data.flags.contains(Flags::AUTO_SIZE) { + &b"qwd"[..] + } else if data.flags.contains(Flags::AUTO_NO32) { + &b"qw"[..] + } else if data.flags.contains(Flags::AUTO_REXW) { + &b"qd"[..] + } else if data.flags.contains(Flags::AUTO_VEXL) { + &b"ho"[..] + } else if name == "monitorx" { + &b"qwd"[..] + } else { + &b"!"[..] + }; let mut forms = Vec::new(); for opsize in opsizes.iter().cloned() { @@ -35,13 +42,13 @@ pub fn format_opdata(name: &str, data: &Opdata) -> Vec { buf.push_str(&format_arg(ty, size, opsize)) } if data.flags.contains(Flags::X86_ONLY) { - for _ in buf.len() .. 45 { + for _ in buf.len()..45 { buf.push(' '); } buf.push_str(" (x86 only)"); } if !data.features.is_empty() { - for _ in buf.len() .. 45 { + for _ in buf.len()..45 { buf.push(' '); } buf.push_str(&format!(" ({})", data.features)); @@ -51,8 +58,10 @@ pub fn format_opdata(name: &str, data: &Opdata) -> Vec { forms } -static REGS: [&str; 16] = ["a", "d", "c", "b", "bp", "sp", "si", "di", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]; +static REGS: [&str; 16] = [ + "a", "d", "c", "b", "bp", "sp", "si", "di", "r8", "r9", "r10", "r11", "r12", "r13", "r14", + "r15", +]; static SEGREGS: [&str; 6] = ["es", "cs", "ss", "ds", "fs", "gs"]; fn format_arg(ty: u8, mut size: u8, opsize: u8) -> Cow<'static, str> { @@ -74,48 +83,76 @@ fn format_arg(ty: u8, mut size: u8, opsize: u8) -> Cow<'static, str> { b'p' => "80", b'o' => "128", b'h' => "256", - _ => "" + _ => "", } } match ty { - b'i' => format!("imm{}", format_size(size)).into(), - b'o' => format!("rel{}off", format_size(size)).into(), - b'm' => format!("mem{}", format_size(size)).into(), + b'i' => format!("imm{}", format_size(size)).into(), + b'o' => format!("rel{}off", format_size(size)).into(), + b'm' => format!("mem{}", format_size(size)).into(), b'k' => format!("vm32addr{}", format_size(size)).into(), b'l' => format!("vm64addr{}", format_size(size)).into(), - b'r' => format!("reg{}", format_size(size)).into(), + b'r' => format!("reg{}", format_size(size)).into(), b'f' => "st".into(), b'x' => "mm".into(), - b'y' => (if size == b'h' {"ymm"} else {"xmm"}).into(), + b'y' => (if size == b'h' { "ymm" } else { "xmm" }).into(), b's' => "segreg".into(), b'c' => "creg".into(), b'd' => "dreg".into(), b'b' => "bndreg".into(), b'v' => format!("reg/mem{}", format_size(size)).into(), b'u' => format!("mm/mem{}", format_size(size)).into(), - b'w' => format!("{}mm/mem{}", if size == b'h' {"y"} else {"x"}, format_size(size)).into(), + b'w' => format!( + "{}mm/mem{}", + if size == b'h' { "y" } else { "x" }, + format_size(size) + ) + .into(), b'A'..=b'P' => { let i = ty as usize - 'A' as usize; match size { - b'b' => if i < 4 { format!("{}l", REGS[i]).into() } - else if i < 8 { REGS[i].into() } - else { format!("{}b", REGS[i]).into() }, - b'w' => if i < 4 { format!("{}x", REGS[i]).into() } - else if i < 8 { REGS[i].into() } - else { format!("{}w", REGS[i]).into() }, - b'd' => if i < 4 { format!("e{}x",REGS[i]).into() } - else if i < 8 { format!("e{}", REGS[i]).into() } - else { format!("{}d", REGS[i]).into() }, - b'q' => if i < 4 { format!("r{}x",REGS[i]).into() } - else { format!("r{}", REGS[i]).into() }, - _ => panic!("invalid formatting data") + b'b' => { + if i < 4 { + format!("{}l", REGS[i]).into() + } else if i < 8 { + REGS[i].into() + } else { + format!("{}b", REGS[i]).into() + } + } + b'w' => { + if i < 4 { + format!("{}x", REGS[i]).into() + } else if i < 8 { + REGS[i].into() + } else { + format!("{}w", REGS[i]).into() + } + } + b'd' => { + if i < 4 { + format!("e{}x", REGS[i]).into() + } else if i < 8 { + format!("e{}", REGS[i]).into() + } else { + format!("{}d", REGS[i]).into() + } + } + b'q' => { + if i < 4 { + format!("r{}x", REGS[i]).into() + } else { + format!("r{}", REGS[i]).into() + } + } + _ => panic!("invalid formatting data"), } - }, + } b'Q'..=b'V' => SEGREGS[ty as usize - 'Q' as usize].into(), b'W' => "cr8".into(), b'X' => "st0".into(), - _ => panic!("invalid formatting data") + _ => panic!("invalid formatting data"), } } @@ -130,7 +167,8 @@ pub fn create_opmap() -> String { // get the data for this mnemnonic let data = super::x64data::get_mnemnonic_data(mnemnonic).unwrap(); // format the data for the opmap docs - let mut formats = data.into_iter() + let mut formats = data + .into_iter() .map(|x| format_opdata(mnemnonic, x)) .flat_map(|x| x) .map(|x| x.replace(">>> ", "")) diff --git a/plugin/src/arch/x64/mod.rs b/plugin/src/arch/x64/mod.rs index 007d415113..9eadc72f21 100644 --- a/plugin/src/arch/x64/mod.rs +++ b/plugin/src/arch/x64/mod.rs @@ -1,15 +1,15 @@ -use syn::parse; use proc_macro_error::emit_error; +use syn::parse; mod ast; mod compiler; -mod parser; mod debug; +mod parser; mod x64data; -use crate::State; use crate::arch::Arch; -use crate::common::{Size, Stmt, Jump}; +use crate::common::{Jump, Size, Stmt}; +use crate::State; #[cfg(feature = "dynasm_opmap")] pub use debug::create_opmap; @@ -17,23 +17,25 @@ pub use debug::create_opmap; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum X86Mode { Long, - Protected + Protected, } struct Context<'a, 'b: 'a> { pub state: &'a mut State<'b>, pub mode: X86Mode, - pub features: x64data::Features + pub features: x64data::Features, } #[derive(Clone, Debug)] pub struct Archx64 { - features: x64data::Features + features: x64data::Features, } impl Default for Archx64 { fn default() -> Archx64 { - Archx64 { features: x64data::Features::all() } + Archx64 { + features: x64data::Features::all(), + } } } @@ -48,7 +50,11 @@ impl Arch for Archx64 { new_features |= match x64data::Features::from_str(&ident.to_string()) { Some(feature) => feature, None => { - emit_error!(ident, "Architecture x64 does not support feature '{}'", ident); + emit_error!( + ident, + "Architecture x64 does not support feature '{}'", + ident + ); continue; } } @@ -66,11 +72,15 @@ impl Arch for Archx64 { 0x90 } - fn compile_instruction(&self, state: &mut State, input: parse::ParseStream) -> parse::Result<()> { + fn compile_instruction( + &self, + state: &mut State, + input: parse::ParseStream, + ) -> parse::Result<()> { let mut ctx = Context { state, mode: X86Mode::Long, - features: self.features + features: self.features, }; let (instruction, args) = parser::parse_instruction(&mut ctx, input)?; let span = instruction.span; @@ -84,12 +94,14 @@ impl Arch for Archx64 { #[derive(Clone, Debug)] pub struct Archx86 { - features: x64data::Features + features: x64data::Features, } impl Default for Archx86 { fn default() -> Archx86 { - Archx86 { features: x64data::Features::all() } + Archx86 { + features: x64data::Features::all(), + } } } @@ -104,7 +116,11 @@ impl Arch for Archx86 { new_features |= match x64data::Features::from_str(&ident.to_string()) { Some(feature) => feature, None => { - emit_error!(ident, "Architecture x86 does not support feature '{}'", ident); + emit_error!( + ident, + "Architecture x86 does not support feature '{}'", + ident + ); continue; } } @@ -122,11 +138,15 @@ impl Arch for Archx86 { 0x90 } - fn compile_instruction(&self, state: &mut State, input: parse::ParseStream) -> parse::Result<()> { + fn compile_instruction( + &self, + state: &mut State, + input: parse::ParseStream, + ) -> parse::Result<()> { let mut ctx = Context { state, mode: X86Mode::Protected, - features: self.features + features: self.features, }; let (instruction, args) = parser::parse_instruction(&mut ctx, input)?; let span = instruction.span; diff --git a/plugin/src/arch/x64/parser.rs b/plugin/src/arch/x64/parser.rs index 148b28611f..669459ea68 100644 --- a/plugin/src/arch/x64/parser.rs +++ b/plugin/src/arch/x64/parser.rs @@ -1,15 +1,17 @@ -use syn::{parse, Token}; -use syn::spanned::Spanned; use proc_macro2::Span; use proc_macro_error::emit_error; +use syn::spanned::Spanned; +use syn::{parse, Token}; use lazy_static::lazy_static; use crate::common::Size; -use crate::parse_helpers::{eat_pseudo_keyword, parse_ident_or_rust_keyword, as_ident, ParseOptExt}; +use crate::parse_helpers::{ + as_ident, eat_pseudo_keyword, parse_ident_or_rust_keyword, ParseOptExt, +}; +use super::ast::{Instruction, MemoryRefItem, RawArg, RegFamily, RegId, Register}; use super::{Context, X86Mode}; -use super::ast::{Instruction, RawArg, Register, RegId, RegFamily, MemoryRefItem}; use std::collections::HashMap; @@ -19,7 +21,10 @@ use std::collections::HashMap; // parses a full instruction // syntax for a single op: PREFIX* ident (SIZE? expr ("," SIZE? expr)*)? ";" -pub(super) fn parse_instruction(ctx: &mut Context, input: parse::ParseStream) -> parse::Result<(Instruction, Vec)> { +pub(super) fn parse_instruction( + ctx: &mut Context, + input: parse::ParseStream, +) -> parse::Result<(Instruction, Vec)> { let span = input.cursor().span(); let mut ops = Vec::new(); @@ -47,22 +52,13 @@ pub(super) fn parse_instruction(ctx: &mut Context, input: parse::ParseStream) -> // let span = span.join(input.cursor().span()); // FIXME can't join spans ATM - Ok(( - Instruction { - idents: ops, - span - }, - args - )) + Ok((Instruction { idents: ops, span }, args)) } /// checks if the given ident is a valid x86 prefix fn is_prefix(ident: &syn::Ident) -> bool { const PREFIXES: [&str; 12] = [ - "lock", - "rep", "repe", "repz", - "repne", "repnz", - "ss", "cs", "ds", "es", "fs", "gs" + "lock", "rep", "repe", "repz", "repne", "repnz", "ss", "cs", "ds", "es", "fs", "gs", ]; PREFIXES.contains(&ident.to_string().as_str()) @@ -79,7 +75,7 @@ fn eat_size_hint(ctx: &Context, input: parse::ParseStream) -> Option { ("QWORD", Size::QWORD), ("TWORD", Size::PWORD), ("OWORD", Size::OWORD), - ("YWORD", Size::HWORD) + ("YWORD", Size::HWORD), ]; const X64_SIZES: [(&str, Size); 9] = [ ("BYTE", Size::BYTE), @@ -90,12 +86,12 @@ fn eat_size_hint(ctx: &Context, input: parse::ParseStream) -> Option { ("QWORD", Size::QWORD), ("TWORD", Size::PWORD), ("OWORD", Size::OWORD), - ("YWORD", Size::HWORD) + ("YWORD", Size::HWORD), ]; let sizes = match ctx.mode { X86Mode::Protected => &X86_SIZES, - X86Mode::Long => &X64_SIZES + X86Mode::Long => &X64_SIZES, }; for &(kw, size) in sizes { if eat_pseudo_keyword(input, kw) { @@ -114,10 +110,7 @@ fn parse_arg(ctx: &mut Context, input: parse::ParseStream) -> parse::Result parse::Result parse::Result parse::Result Option<(Span, Register)> { } let (reg, size) = match ctx.mode { - X86Mode::Long => X64_REGISTERS.get(&name).cloned(), - X86Mode::Protected => X86_REGISTERS.get(&name).cloned() + X86Mode::Long => X64_REGISTERS.get(&name).cloned(), + X86Mode::Protected => X86_REGISTERS.get(&name).cloned(), }?; - Some(( - path.span(), - Register::new_static(size, reg) - )) - - } else if let syn::Expr::Call(syn::ExprCall {ref func, ref args, ..}) = expr { + Some((path.span(), Register::new_static(size, reg))) + } else if let syn::Expr::Call(syn::ExprCall { + ref func, ref args, .. + }) = expr + { // dynamically chosen registers ( ident(expr) ) if args.len() != 1 { return None; @@ -260,13 +242,13 @@ fn parse_reg(ctx: &Context, expr: &syn::Expr) -> Option<(Span, Register)> { let name = called.to_string(); let name = name.as_str(); let (size, family) = match ctx.mode { - X86Mode::Long => X64_FAMILIES.get(&name).cloned(), - X86Mode::Protected => X86_FAMILIES.get(&name).cloned() + X86Mode::Long => X64_FAMILIES.get(&name).cloned(), + X86Mode::Protected => X86_FAMILIES.get(&name).cloned(), }?; Some(( expr.span(), // FIXME:can't join spans atm - Register::new_dynamic(size, family, args[0].clone()) + Register::new_dynamic(size, family, args[0].clone()), )) } else { None @@ -287,10 +269,16 @@ fn parse_adds(ctx: &Context, expr: syn::Expr) -> Vec { items.push(MemoryRefItem::Register(reg)); continue; } - if let syn::Expr::Binary(syn::ExprBinary { op: syn::BinOp::Mul(_), ref left, ref right, .. } ) = node { + if let syn::Expr::Binary(syn::ExprBinary { + op: syn::BinOp::Mul(_), + ref left, + ref right, + .. + }) = node + { // reg * const if let Some((_, reg)) = parse_reg(ctx, left) { - if let syn::Expr::Lit(syn::ExprLit {ref lit, ..}) = **right { + if let syn::Expr::Lit(syn::ExprLit { ref lit, .. }) = **right { if let syn::Lit::Int(lit) = lit { if let Ok(value) = lit.base10_parse::() { items.push(MemoryRefItem::ScaledRegister(reg, value)); @@ -300,7 +288,7 @@ fn parse_adds(ctx: &Context, expr: syn::Expr) -> Vec { } } // const * reg if let Some((_, reg)) = parse_reg(ctx, right) { - if let syn::Expr::Lit(syn::ExprLit {ref lit, ..}) = **left { + if let syn::Expr::Lit(syn::ExprLit { ref lit, .. }) = **left { if let syn::Lit::Int(lit) = lit { if let Ok(value) = lit.base10_parse::() { items.push(MemoryRefItem::ScaledRegister(reg, value)); @@ -318,132 +306,142 @@ fn parse_adds(ctx: &Context, expr: syn::Expr) -> Vec { /// Takes an expression and splits all added (and subtracted) components fn collect_adds(node: syn::Expr, collection: &mut Vec) { - if let syn::Expr::Binary(syn::ExprBinary { op: syn::BinOp::Add(_), left, right, .. } ) = node { + if let syn::Expr::Binary(syn::ExprBinary { + op: syn::BinOp::Add(_), + left, + right, + .. + }) = node + { collect_adds(*left, collection); collect_adds(*right, collection); - } else if let syn::Expr::Binary(syn::ExprBinary { op: syn::BinOp::Sub(sub), left, right, .. } ) = node { + } else if let syn::Expr::Binary(syn::ExprBinary { + op: syn::BinOp::Sub(sub), + left, + right, + .. + }) = node + { collect_adds(*left, collection); - collection.push(syn::Expr::Unary(syn::ExprUnary { op: syn::UnOp::Neg(sub), expr: right, attrs: Vec::new() } )); + collection.push(syn::Expr::Unary(syn::ExprUnary { + op: syn::UnOp::Neg(sub), + expr: right, + attrs: Vec::new(), + })); } else { collection.push(node); } } // why -lazy_static!{ +lazy_static! { static ref X64_REGISTERS: HashMap<&'static str, (RegId, Size)> = { use self::RegId::*; use crate::common::Size::*; static MAP: &[(&str, (RegId, Size))] = &[ ("rax", (RAX, QWORD)), - ("r0" , (RAX, QWORD)), + ("r0", (RAX, QWORD)), ("rcx", (RCX, QWORD)), - ("r1" , (RCX, QWORD)), + ("r1", (RCX, QWORD)), ("rdx", (RDX, QWORD)), - ("r2" , (RDX, QWORD)), + ("r2", (RDX, QWORD)), ("rbx", (RBX, QWORD)), - ("r3" , (RBX, QWORD)), + ("r3", (RBX, QWORD)), ("rsp", (RSP, QWORD)), - ("r4" , (RSP, QWORD)), + ("r4", (RSP, QWORD)), ("rbp", (RBP, QWORD)), - ("r5" , (RBP, QWORD)), + ("r5", (RBP, QWORD)), ("rsi", (RSI, QWORD)), - ("r6" , (RSI, QWORD)), + ("r6", (RSI, QWORD)), ("rdi", (RDI, QWORD)), - ("r7" , (RDI, QWORD)), - ("r8" , (R8, QWORD)), - ("r9" , (R9, QWORD)), + ("r7", (RDI, QWORD)), + ("r8", (R8, QWORD)), + ("r9", (R9, QWORD)), ("r10", (R10, QWORD)), ("r11", (R11, QWORD)), ("r12", (R12, QWORD)), ("r13", (R13, QWORD)), ("r14", (R14, QWORD)), ("r15", (R15, QWORD)), - - ("eax" , (RAX, DWORD)), - ("r0d" , (RAX, DWORD)), - ("ecx" , (RCX, DWORD)), - ("r1d" , (RCX, DWORD)), - ("edx" , (RDX, DWORD)), - ("r2d" , (RDX, DWORD)), - ("ebx" , (RBX, DWORD)), - ("r3d" , (RBX, DWORD)), - ("esp" , (RSP, DWORD)), - ("r4d" , (RSP, DWORD)), - ("ebp" , (RBP, DWORD)), - ("r5d" , (RBP, DWORD)), - ("esi" , (RSI, DWORD)), - ("r6d" , (RSI, DWORD)), - ("edi" , (RDI, DWORD)), - ("r7d" , (RDI, DWORD)), - ("r8d" , (R8, DWORD)), - ("r9d" , (R9, DWORD)), + ("eax", (RAX, DWORD)), + ("r0d", (RAX, DWORD)), + ("ecx", (RCX, DWORD)), + ("r1d", (RCX, DWORD)), + ("edx", (RDX, DWORD)), + ("r2d", (RDX, DWORD)), + ("ebx", (RBX, DWORD)), + ("r3d", (RBX, DWORD)), + ("esp", (RSP, DWORD)), + ("r4d", (RSP, DWORD)), + ("ebp", (RBP, DWORD)), + ("r5d", (RBP, DWORD)), + ("esi", (RSI, DWORD)), + ("r6d", (RSI, DWORD)), + ("edi", (RDI, DWORD)), + ("r7d", (RDI, DWORD)), + ("r8d", (R8, DWORD)), + ("r9d", (R9, DWORD)), ("r10d", (R10, DWORD)), ("r11d", (R11, DWORD)), ("r12d", (R12, DWORD)), ("r13d", (R13, DWORD)), ("r14d", (R14, DWORD)), ("r15d", (R15, DWORD)), - - ("ax" , (RAX, WORD)), - ("r0w" , (RAX, WORD)), - ("cx" , (RCX, WORD)), - ("r1w" , (RCX, WORD)), - ("dx" , (RDX, WORD)), - ("r2w" , (RDX, WORD)), - ("bx" , (RBX, WORD)), - ("r3w" , (RBX, WORD)), - ("sp" , (RSP, WORD)), - ("r4w" , (RSP, WORD)), - ("bp" , (RBP, WORD)), - ("r5w" , (RBP, WORD)), - ("si" , (RSI, WORD)), - ("r6w" , (RSI, WORD)), - ("di" , (RDI, WORD)), - ("r7w" , (RDI, WORD)), - ("r8w" , (R8, WORD)), - ("r9w" , (R9, WORD)), + ("ax", (RAX, WORD)), + ("r0w", (RAX, WORD)), + ("cx", (RCX, WORD)), + ("r1w", (RCX, WORD)), + ("dx", (RDX, WORD)), + ("r2w", (RDX, WORD)), + ("bx", (RBX, WORD)), + ("r3w", (RBX, WORD)), + ("sp", (RSP, WORD)), + ("r4w", (RSP, WORD)), + ("bp", (RBP, WORD)), + ("r5w", (RBP, WORD)), + ("si", (RSI, WORD)), + ("r6w", (RSI, WORD)), + ("di", (RDI, WORD)), + ("r7w", (RDI, WORD)), + ("r8w", (R8, WORD)), + ("r9w", (R9, WORD)), ("r10w", (R10, WORD)), ("r11w", (R11, WORD)), ("r12w", (R12, WORD)), ("r13w", (R13, WORD)), ("r14w", (R14, WORD)), ("r15w", (R15, WORD)), - - ("al" , (RAX, BYTE)), - ("r0b" , (RAX, BYTE)), - ("cl" , (RCX, BYTE)), - ("r1b" , (RCX, BYTE)), - ("dl" , (RDX, BYTE)), - ("r2b" , (RDX, BYTE)), - ("bl" , (RBX, BYTE)), - ("r3b" , (RBX, BYTE)), - ("spl" , (RSP, BYTE)), - ("r4b" , (RSP, BYTE)), - ("bpl" , (RBP, BYTE)), - ("r5b" , (RBP, BYTE)), - ("sil" , (RSI, BYTE)), - ("r6b" , (RSI, BYTE)), - ("dil" , (RDI, BYTE)), - ("r7b" , (RDI, BYTE)), - ("r8b" , (R8, BYTE)), - ("r9b" , (R9, BYTE)), + ("al", (RAX, BYTE)), + ("r0b", (RAX, BYTE)), + ("cl", (RCX, BYTE)), + ("r1b", (RCX, BYTE)), + ("dl", (RDX, BYTE)), + ("r2b", (RDX, BYTE)), + ("bl", (RBX, BYTE)), + ("r3b", (RBX, BYTE)), + ("spl", (RSP, BYTE)), + ("r4b", (RSP, BYTE)), + ("bpl", (RBP, BYTE)), + ("r5b", (RBP, BYTE)), + ("sil", (RSI, BYTE)), + ("r6b", (RSI, BYTE)), + ("dil", (RDI, BYTE)), + ("r7b", (RDI, BYTE)), + ("r8b", (R8, BYTE)), + ("r9b", (R9, BYTE)), ("r10b", (R10, BYTE)), ("r11b", (R11, BYTE)), ("r12b", (R12, BYTE)), ("r13b", (R13, BYTE)), ("r14b", (R14, BYTE)), ("r15b", (R15, BYTE)), - ("rip", (RIP, QWORD)), ("eip", (RIP, DWORD)), - ("ah", (AH, BYTE)), ("ch", (CH, BYTE)), ("dh", (DH, BYTE)), ("bh", (BH, BYTE)), - ("st0", (ST0, PWORD)), ("st1", (ST1, PWORD)), ("st2", (ST2, PWORD)), @@ -452,7 +450,6 @@ lazy_static!{ ("st5", (ST5, PWORD)), ("st6", (ST6, PWORD)), ("st7", (ST7, PWORD)), - ("mm0", (MMX0, QWORD)), ("mm1", (MMX1, QWORD)), ("mm2", (MMX2, QWORD)), @@ -461,82 +458,76 @@ lazy_static!{ ("mm5", (MMX5, QWORD)), ("mm6", (MMX6, QWORD)), ("mm7", (MMX7, QWORD)), - - ("xmm0" , (XMM0 , OWORD)), - ("xmm1" , (XMM1 , OWORD)), - ("xmm2" , (XMM2 , OWORD)), - ("xmm3" , (XMM3 , OWORD)), - ("xmm4" , (XMM4 , OWORD)), - ("xmm5" , (XMM5 , OWORD)), - ("xmm6" , (XMM6 , OWORD)), - ("xmm7" , (XMM7 , OWORD)), - ("xmm8" , (XMM8 , OWORD)), - ("xmm9" , (XMM9 , OWORD)), + ("xmm0", (XMM0, OWORD)), + ("xmm1", (XMM1, OWORD)), + ("xmm2", (XMM2, OWORD)), + ("xmm3", (XMM3, OWORD)), + ("xmm4", (XMM4, OWORD)), + ("xmm5", (XMM5, OWORD)), + ("xmm6", (XMM6, OWORD)), + ("xmm7", (XMM7, OWORD)), + ("xmm8", (XMM8, OWORD)), + ("xmm9", (XMM9, OWORD)), ("xmm10", (XMM10, OWORD)), ("xmm11", (XMM11, OWORD)), ("xmm12", (XMM12, OWORD)), ("xmm13", (XMM13, OWORD)), ("xmm14", (XMM14, OWORD)), ("xmm15", (XMM15, OWORD)), - - ("ymm0" , (XMM0 , HWORD)), - ("ymm1" , (XMM1 , HWORD)), - ("ymm2" , (XMM2 , HWORD)), - ("ymm3" , (XMM3 , HWORD)), - ("ymm4" , (XMM4 , HWORD)), - ("ymm5" , (XMM5 , HWORD)), - ("ymm6" , (XMM6 , HWORD)), - ("ymm7" , (XMM7 , HWORD)), - ("ymm8" , (XMM8 , HWORD)), - ("ymm9" , (XMM9 , HWORD)), + ("ymm0", (XMM0, HWORD)), + ("ymm1", (XMM1, HWORD)), + ("ymm2", (XMM2, HWORD)), + ("ymm3", (XMM3, HWORD)), + ("ymm4", (XMM4, HWORD)), + ("ymm5", (XMM5, HWORD)), + ("ymm6", (XMM6, HWORD)), + ("ymm7", (XMM7, HWORD)), + ("ymm8", (XMM8, HWORD)), + ("ymm9", (XMM9, HWORD)), ("ymm10", (XMM10, HWORD)), ("ymm11", (XMM11, HWORD)), ("ymm12", (XMM12, HWORD)), ("ymm13", (XMM13, HWORD)), ("ymm14", (XMM14, HWORD)), ("ymm15", (XMM15, HWORD)), - ("es", (ES, WORD)), ("cs", (CS, WORD)), ("ss", (SS, WORD)), ("ds", (DS, WORD)), ("fs", (FS, WORD)), ("gs", (GS, WORD)), - - ("cr0" , (CR0 , QWORD)), - ("cr1" , (CR1 , QWORD)), - ("cr2" , (CR2 , QWORD)), - ("cr3" , (CR3 , QWORD)), - ("cr4" , (CR4 , QWORD)), - ("cr5" , (CR5 , QWORD)), - ("cr6" , (CR6 , QWORD)), - ("cr7" , (CR7 , QWORD)), - ("cr8" , (CR8 , QWORD)), - ("cr9" , (CR9 , QWORD)), + ("cr0", (CR0, QWORD)), + ("cr1", (CR1, QWORD)), + ("cr2", (CR2, QWORD)), + ("cr3", (CR3, QWORD)), + ("cr4", (CR4, QWORD)), + ("cr5", (CR5, QWORD)), + ("cr6", (CR6, QWORD)), + ("cr7", (CR7, QWORD)), + ("cr8", (CR8, QWORD)), + ("cr9", (CR9, QWORD)), ("cr10", (CR10, QWORD)), ("cr11", (CR11, QWORD)), ("cr12", (CR12, QWORD)), ("cr13", (CR13, QWORD)), ("cr14", (CR14, QWORD)), ("cr15", (CR15, QWORD)), - - ("dr0" , (DR0 , QWORD)), - ("dr1" , (DR1 , QWORD)), - ("dr2" , (DR2 , QWORD)), - ("dr3" , (DR3 , QWORD)), - ("dr4" , (DR4 , QWORD)), - ("dr5" , (DR5 , QWORD)), - ("dr6" , (DR6 , QWORD)), - ("dr7" , (DR7 , QWORD)), - ("dr8" , (DR8 , QWORD)), - ("dr9" , (DR9 , QWORD)), + ("dr0", (DR0, QWORD)), + ("dr1", (DR1, QWORD)), + ("dr2", (DR2, QWORD)), + ("dr3", (DR3, QWORD)), + ("dr4", (DR4, QWORD)), + ("dr5", (DR5, QWORD)), + ("dr6", (DR6, QWORD)), + ("dr7", (DR7, QWORD)), + ("dr8", (DR8, QWORD)), + ("dr9", (DR9, QWORD)), ("dr10", (DR10, QWORD)), ("dr11", (DR11, QWORD)), ("dr12", (DR12, QWORD)), ("dr13", (DR13, QWORD)), ("dr14", (DR14, QWORD)), ("dr15", (DR15, QWORD)), - ("bnd0", (BND0, OWORD)), ("bnd1", (BND1, OWORD)), ("bnd2", (BND2, OWORD)), @@ -557,7 +548,6 @@ lazy_static!{ ("ebp", (RBP, DWORD)), ("esi", (RSI, DWORD)), ("edi", (RDI, DWORD)), - ("ax", (RAX, WORD)), ("cx", (RCX, WORD)), ("dx", (RDX, WORD)), @@ -566,19 +556,15 @@ lazy_static!{ ("bp", (RBP, WORD)), ("si", (RSI, WORD)), ("di", (RDI, WORD)), - ("al", (RAX, BYTE)), ("cl", (RCX, BYTE)), ("dl", (RDX, BYTE)), ("bl", (RBX, BYTE)), - ("eip", (RIP, DWORD)), - ("ah", (AH, BYTE)), ("ch", (CH, BYTE)), ("dh", (DH, BYTE)), ("bh", (BH, BYTE)), - ("st0", (ST0, PWORD)), ("st1", (ST1, PWORD)), ("st2", (ST2, PWORD)), @@ -587,7 +573,6 @@ lazy_static!{ ("st5", (ST5, PWORD)), ("st6", (ST6, PWORD)), ("st7", (ST7, PWORD)), - ("mm0", (MMX0, QWORD)), ("mm1", (MMX1, QWORD)), ("mm2", (MMX2, QWORD)), @@ -596,7 +581,6 @@ lazy_static!{ ("mm5", (MMX5, QWORD)), ("mm6", (MMX6, QWORD)), ("mm7", (MMX7, QWORD)), - ("xmm0", (XMM0, OWORD)), ("xmm1", (XMM1, OWORD)), ("xmm2", (XMM2, OWORD)), @@ -605,7 +589,6 @@ lazy_static!{ ("xmm5", (XMM5, OWORD)), ("xmm6", (XMM6, OWORD)), ("xmm7", (XMM7, OWORD)), - ("ymm0", (XMM0, HWORD)), ("ymm1", (XMM1, HWORD)), ("ymm2", (XMM2, HWORD)), @@ -614,14 +597,12 @@ lazy_static!{ ("ymm5", (XMM5, HWORD)), ("ymm6", (XMM6, HWORD)), ("ymm7", (XMM7, HWORD)), - ("es", (ES, WORD)), ("cs", (CS, WORD)), ("ss", (SS, WORD)), ("ds", (DS, WORD)), ("fs", (FS, WORD)), ("gs", (GS, WORD)), - ("cr0", (CR0, DWORD)), ("cr1", (CR1, DWORD)), ("cr2", (CR2, DWORD)), @@ -630,7 +611,6 @@ lazy_static!{ ("cr5", (CR5, DWORD)), ("cr6", (CR6, DWORD)), ("cr7", (CR7, DWORD)), - ("dr0", (DR0, DWORD)), ("dr1", (DR1, DWORD)), ("dr2", (DR2, DWORD)), @@ -639,7 +619,6 @@ lazy_static!{ ("dr5", (DR5, DWORD)), ("dr6", (DR6, DWORD)), ("dr7", (DR7, DWORD)), - ("bnd0", (BND0, OWORD)), ("bnd1", (BND1, OWORD)), ("bnd2", (BND2, OWORD)), @@ -647,11 +626,11 @@ lazy_static!{ ]; MAP.iter().cloned().collect() }; - static ref X64_FAMILIES: HashMap<&'static str, (Size, RegFamily)> = { + static ref X64_FAMILIES: HashMap<&'static str, (Size, RegFamily)> = { static MAP: &[(&str, (Size, RegFamily))] = &[ - ("Rb", (Size::BYTE, RegFamily::LEGACY)), - ("Rh", (Size::BYTE, RegFamily::HIGHBYTE)), - ("Rw", (Size::WORD, RegFamily::LEGACY)), + ("Rb", (Size::BYTE, RegFamily::LEGACY)), + ("Rh", (Size::BYTE, RegFamily::HIGHBYTE)), + ("Rw", (Size::WORD, RegFamily::LEGACY)), ("Rd", (Size::DWORD, RegFamily::LEGACY)), ("Ra", (Size::QWORD, RegFamily::LEGACY)), ("Rq", (Size::QWORD, RegFamily::LEGACY)), @@ -659,28 +638,28 @@ lazy_static!{ ("Rm", (Size::QWORD, RegFamily::MMX)), ("Rx", (Size::OWORD, RegFamily::XMM)), ("Ry", (Size::HWORD, RegFamily::XMM)), - ("Rs", (Size::WORD, RegFamily::SEGMENT)), + ("Rs", (Size::WORD, RegFamily::SEGMENT)), ("RC", (Size::QWORD, RegFamily::CONTROL)), ("RD", (Size::QWORD, RegFamily::DEBUG)), ("RB", (Size::OWORD, RegFamily::BOUND)), ]; MAP.iter().cloned().collect() }; - static ref X86_FAMILIES: HashMap<&'static str, (Size, RegFamily)> = { + static ref X86_FAMILIES: HashMap<&'static str, (Size, RegFamily)> = { static MAP: &[(&str, (Size, RegFamily))] = &[ - ("Rb",(Size::BYTE, RegFamily::LEGACY)), - ("Rh",(Size::BYTE, RegFamily::HIGHBYTE)), - ("Rw",(Size::WORD, RegFamily::LEGACY)), - ("Ra",(Size::DWORD, RegFamily::LEGACY)), - ("Rd",(Size::DWORD, RegFamily::LEGACY)), - ("Rf",(Size::PWORD, RegFamily::FP)), - ("Rm",(Size::QWORD, RegFamily::MMX)), - ("Rx",(Size::OWORD, RegFamily::XMM)), - ("Ry",(Size::HWORD, RegFamily::XMM)), - ("Rs",(Size::WORD, RegFamily::SEGMENT)), - ("RC",(Size::DWORD, RegFamily::CONTROL)), - ("RD",(Size::DWORD, RegFamily::DEBUG)), - ("RB",(Size::OWORD, RegFamily::BOUND)), + ("Rb", (Size::BYTE, RegFamily::LEGACY)), + ("Rh", (Size::BYTE, RegFamily::HIGHBYTE)), + ("Rw", (Size::WORD, RegFamily::LEGACY)), + ("Ra", (Size::DWORD, RegFamily::LEGACY)), + ("Rd", (Size::DWORD, RegFamily::LEGACY)), + ("Rf", (Size::PWORD, RegFamily::FP)), + ("Rm", (Size::QWORD, RegFamily::MMX)), + ("Rx", (Size::OWORD, RegFamily::XMM)), + ("Ry", (Size::HWORD, RegFamily::XMM)), + ("Rs", (Size::WORD, RegFamily::SEGMENT)), + ("RC", (Size::DWORD, RegFamily::CONTROL)), + ("RD", (Size::DWORD, RegFamily::DEBUG)), + ("RB", (Size::OWORD, RegFamily::BOUND)), ]; MAP.iter().cloned().collect() }; diff --git a/plugin/src/arch/x64/x64data.rs b/plugin/src/arch/x64/x64data.rs index 546bf48147..5d0e5bc2a7 100644 --- a/plugin/src/arch/x64/x64data.rs +++ b/plugin/src/arch/x64/x64data.rs @@ -1,20 +1,46 @@ -use std::collections::{HashMap, hash_map}; +use std::collections::{hash_map, HashMap}; use super::compiler::Opdata; use std::fmt::{self, Display}; -use lazy_static::lazy_static; use bitflags::bitflags; +use lazy_static::lazy_static; macro_rules! constify { - ($t:ty, $e:expr) => { {const C: &$t = &$e; C} } + ($t:ty, $e:expr) => {{ + const C: &$t = &$e; + C + }}; } macro_rules! OpInner { - ($fmt:expr, $ops:expr, $reg:expr) => { Opdata {args: $fmt, ops: constify!([u8], $ops), reg: $reg, flags: Flags::DEFAULT, features: Features::X64_IMPLICIT} }; - ($fmt:expr, $ops:expr, $reg:expr, $f:expr) => { Opdata {args: $fmt, ops: constify!([u8], $ops), reg: $reg, flags: Flags::make($f), features: Features::X64_IMPLICIT} }; - ($fmt:expr, $ops:expr, $reg:expr, $f:expr, $ft:expr) => { Opdata {args: $fmt, ops: constify!([u8], $ops), reg: $reg, flags: Flags::make($f), features: Features::make($ft)} }; - + ($fmt:expr, $ops:expr, $reg:expr) => { + Opdata { + args: $fmt, + ops: constify!([u8], $ops), + reg: $reg, + flags: Flags::DEFAULT, + features: Features::X64_IMPLICIT, + } + }; + ($fmt:expr, $ops:expr, $reg:expr, $f:expr) => { + Opdata { + args: $fmt, + ops: constify!([u8], $ops), + reg: $reg, + flags: Flags::make($f), + features: Features::X64_IMPLICIT, + } + }; + ($fmt:expr, $ops:expr, $reg:expr, $f:expr, $ft:expr) => { + Opdata { + args: $fmt, + ops: constify!([u8], $ops), + reg: $reg, + flags: Flags::make($f), + features: Features::make($ft), + } + }; } macro_rules! Ops { @@ -114,32 +140,32 @@ impl Features { pub fn from_str(name: &str) -> Option { match name { - "fpu" => Some(Features::FPU), - "mmx" => Some(Features::MMX), + "fpu" => Some(Features::FPU), + "mmx" => Some(Features::MMX), "tdnow" => Some(Features::TDNOW), - "sse" => Some(Features::SSE), - "sse2" => Some(Features::SSE2), - "sse3" => Some(Features::SSE3), - "vmx" => Some(Features::VMX), + "sse" => Some(Features::SSE), + "sse2" => Some(Features::SSE2), + "sse3" => Some(Features::SSE3), + "vmx" => Some(Features::VMX), "ssse3" => Some(Features::SSSE3), "sse4a" => Some(Features::SSE4A), "sse41" => Some(Features::SSE41), "sse42" => Some(Features::SSE42), - "sse5" => Some(Features::SSE5), - "avx" => Some(Features::AVX), - "avx2" => Some(Features::AVX2), - "fma" => Some(Features::FMA), - "bmi1" => Some(Features::BMI1), - "bmi2" => Some(Features::BMI2), - "tbm" => Some(Features::TBM), - "rtm" => Some(Features::RTM), + "sse5" => Some(Features::SSE5), + "avx" => Some(Features::AVX), + "avx2" => Some(Features::AVX2), + "fma" => Some(Features::FMA), + "bmi1" => Some(Features::BMI1), + "bmi2" => Some(Features::BMI2), + "tbm" => Some(Features::TBM), + "rtm" => Some(Features::RTM), "invpcid" => Some(Features::INVPCID), - "mpx" => Some(Features::MPX), - "sha" => Some(Features::SHA), + "mpx" => Some(Features::MPX), + "sha" => Some(Features::SHA), "prefetchwt1" => Some(Features::PREFETCHWT1), "cyrix" => Some(Features::CYRIX), - "amd" => Some(Features::AMD), - _ => None + "amd" => Some(Features::AMD), + _ => None, } } } @@ -147,31 +173,81 @@ impl Features { impl Display for Features { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut keys = Vec::new(); - if self.contains(Features::FPU) { keys.push("fpu"); } - if self.contains(Features::MMX) { keys.push("mmx"); } - if self.contains(Features::TDNOW) { keys.push("tdnow"); } - if self.contains(Features::SSE) { keys.push("sse"); } - if self.contains(Features::SSE2) { keys.push("sse2"); } - if self.contains(Features::SSE3) { keys.push("sse3"); } - if self.contains(Features::VMX) { keys.push("vmx"); } - if self.contains(Features::SSSE3) { keys.push("ssse3"); } - if self.contains(Features::SSE4A) { keys.push("sse4a"); } - if self.contains(Features::SSE41) { keys.push("sse41"); } - if self.contains(Features::SSE42) { keys.push("sse42"); } - if self.contains(Features::SSE5) { keys.push("sse5"); } - if self.contains(Features::AVX) { keys.push("avx"); } - if self.contains(Features::AVX2) { keys.push("avx2"); } - if self.contains(Features::FMA) { keys.push("fma"); } - if self.contains(Features::BMI1) { keys.push("bmi1"); } - if self.contains(Features::BMI2) { keys.push("bmi2"); } - if self.contains(Features::TBM) { keys.push("tbm"); } - if self.contains(Features::RTM) { keys.push("rtm"); } - if self.contains(Features::INVPCID) { keys.push("invpcid"); } - if self.contains(Features::MPX) { keys.push("mpx"); } - if self.contains(Features::SHA) { keys.push("sha"); } - if self.contains(Features::PREFETCHWT1) { keys.push("prefetchwt1"); } - if self.contains(Features::CYRIX) { keys.push("cyrix"); } - if self.contains(Features::AMD) { keys.push("amd"); } + if self.contains(Features::FPU) { + keys.push("fpu"); + } + if self.contains(Features::MMX) { + keys.push("mmx"); + } + if self.contains(Features::TDNOW) { + keys.push("tdnow"); + } + if self.contains(Features::SSE) { + keys.push("sse"); + } + if self.contains(Features::SSE2) { + keys.push("sse2"); + } + if self.contains(Features::SSE3) { + keys.push("sse3"); + } + if self.contains(Features::VMX) { + keys.push("vmx"); + } + if self.contains(Features::SSSE3) { + keys.push("ssse3"); + } + if self.contains(Features::SSE4A) { + keys.push("sse4a"); + } + if self.contains(Features::SSE41) { + keys.push("sse41"); + } + if self.contains(Features::SSE42) { + keys.push("sse42"); + } + if self.contains(Features::SSE5) { + keys.push("sse5"); + } + if self.contains(Features::AVX) { + keys.push("avx"); + } + if self.contains(Features::AVX2) { + keys.push("avx2"); + } + if self.contains(Features::FMA) { + keys.push("fma"); + } + if self.contains(Features::BMI1) { + keys.push("bmi1"); + } + if self.contains(Features::BMI2) { + keys.push("bmi2"); + } + if self.contains(Features::TBM) { + keys.push("tbm"); + } + if self.contains(Features::RTM) { + keys.push("rtm"); + } + if self.contains(Features::INVPCID) { + keys.push("invpcid"); + } + if self.contains(Features::MPX) { + keys.push("mpx"); + } + if self.contains(Features::SHA) { + keys.push("sha"); + } + if self.contains(Features::PREFETCHWT1) { + keys.push("prefetchwt1"); + } + if self.contains(Features::CYRIX) { + keys.push("cyrix"); + } + if self.contains(Features::AMD) { + keys.push("amd"); + } for (i, k) in keys.into_iter().enumerate() { if i != 0 { f.write_str(", ")?; @@ -188,60 +264,59 @@ pub fn mnemnonics() -> hash_map::Keys<'static, &'static str, &'static [Opdata]> } // workaround until bitflags can be used in const -const DEFAULT : u32 = Flags::DEFAULT.bits; -const VEX_OP : u32 = Flags::VEX_OP.bits; -const XOP_OP : u32 = Flags::XOP_OP.bits; -const IMM_OP : u32 = Flags::IMM_OP.bits; -const SHORT_ARG : u32 = Flags::SHORT_ARG.bits; -const AUTO_SIZE : u32 = Flags::AUTO_SIZE.bits; -const AUTO_NO32 : u32 = Flags::AUTO_NO32.bits; -const AUTO_REXW : u32 = Flags::AUTO_REXW.bits; -const AUTO_VEXL : u32 = Flags::AUTO_VEXL.bits; -const WORD_SIZE : u32 = Flags::WORD_SIZE.bits; -const WITH_REXW : u32 = Flags::WITH_REXW.bits; -const WITH_VEXL : u32 = Flags::WITH_VEXL.bits; -const EXACT_SIZE : u32 = Flags::EXACT_SIZE.bits; -const PREF_66 : u32 = Flags::PREF_66.bits; -const PREF_67 : u32 = Flags::PREF_67.bits; -const PREF_F0 : u32 = Flags::PREF_F0.bits; -const PREF_F2 : u32 = Flags::PREF_F2.bits; -const PREF_F3 : u32 = Flags::PREF_F3.bits; -const LOCK : u32 = Flags::LOCK.bits; -const REP : u32 = Flags::REP.bits; -const REPE : u32 = Flags::REPE.bits; -const ENC_MR : u32 = Flags::ENC_MR.bits; -const ENC_VM : u32 = Flags::ENC_VM.bits; -const ENC_MIB : u32 = Flags::ENC_MIB.bits; -const X86_ONLY : u32 = Flags::X86_ONLY.bits; +const DEFAULT: u32 = Flags::DEFAULT.bits; +const VEX_OP: u32 = Flags::VEX_OP.bits; +const XOP_OP: u32 = Flags::XOP_OP.bits; +const IMM_OP: u32 = Flags::IMM_OP.bits; +const SHORT_ARG: u32 = Flags::SHORT_ARG.bits; +const AUTO_SIZE: u32 = Flags::AUTO_SIZE.bits; +const AUTO_NO32: u32 = Flags::AUTO_NO32.bits; +const AUTO_REXW: u32 = Flags::AUTO_REXW.bits; +const AUTO_VEXL: u32 = Flags::AUTO_VEXL.bits; +const WORD_SIZE: u32 = Flags::WORD_SIZE.bits; +const WITH_REXW: u32 = Flags::WITH_REXW.bits; +const WITH_VEXL: u32 = Flags::WITH_VEXL.bits; +const EXACT_SIZE: u32 = Flags::EXACT_SIZE.bits; +const PREF_66: u32 = Flags::PREF_66.bits; +const PREF_67: u32 = Flags::PREF_67.bits; +const PREF_F0: u32 = Flags::PREF_F0.bits; +const PREF_F2: u32 = Flags::PREF_F2.bits; +const PREF_F3: u32 = Flags::PREF_F3.bits; +const LOCK: u32 = Flags::LOCK.bits; +const REP: u32 = Flags::REP.bits; +const REPE: u32 = Flags::REPE.bits; +const ENC_MR: u32 = Flags::ENC_MR.bits; +const ENC_VM: u32 = Flags::ENC_VM.bits; +const ENC_MIB: u32 = Flags::ENC_MIB.bits; +const X86_ONLY: u32 = Flags::X86_ONLY.bits; #[allow(dead_code)] -const X64_IMPLICIT : u32 = Features::X64_IMPLICIT.bits; -const FPU : u32 = Features::FPU.bits; -const MMX : u32 = Features::MMX.bits; -const TDNOW : u32 = Features::TDNOW.bits; -const SSE : u32 = Features::SSE.bits; -const SSE2 : u32 = Features::SSE2.bits; -const SSE3 : u32 = Features::SSE3.bits; -const VMX : u32 = Features::VMX.bits; -const SSSE3 : u32 = Features::SSSE3.bits; -const SSE4A : u32 = Features::SSE4A.bits; -const SSE41 : u32 = Features::SSE41.bits; -const SSE42 : u32 = Features::SSE42.bits; -const SSE5 : u32 = Features::SSE5.bits; -const AVX : u32 = Features::AVX.bits; -const AVX2 : u32 = Features::AVX2.bits; -const FMA : u32 = Features::FMA.bits; -const BMI1 : u32 = Features::BMI1.bits; -const BMI2 : u32 = Features::BMI2.bits; -const TBM : u32 = Features::TBM.bits; -const RTM : u32 = Features::RTM.bits; -const INVPCID : u32 = Features::INVPCID.bits; -const MPX : u32 = Features::MPX.bits; -const SHA : u32 = Features::SHA.bits; -const PREFETCHWT1 : u32 = Features::PREFETCHWT1.bits; -const CYRIX : u32 = Features::CYRIX.bits; -const AMD : u32 = Features::AMD.bits; - +const X64_IMPLICIT: u32 = Features::X64_IMPLICIT.bits; +const FPU: u32 = Features::FPU.bits; +const MMX: u32 = Features::MMX.bits; +const TDNOW: u32 = Features::TDNOW.bits; +const SSE: u32 = Features::SSE.bits; +const SSE2: u32 = Features::SSE2.bits; +const SSE3: u32 = Features::SSE3.bits; +const VMX: u32 = Features::VMX.bits; +const SSSE3: u32 = Features::SSSE3.bits; +const SSE4A: u32 = Features::SSE4A.bits; +const SSE41: u32 = Features::SSE41.bits; +const SSE42: u32 = Features::SSE42.bits; +const SSE5: u32 = Features::SSE5.bits; +const AVX: u32 = Features::AVX.bits; +const AVX2: u32 = Features::AVX2.bits; +const FMA: u32 = Features::FMA.bits; +const BMI1: u32 = Features::BMI1.bits; +const BMI2: u32 = Features::BMI2.bits; +const TBM: u32 = Features::TBM.bits; +const RTM: u32 = Features::RTM.bits; +const INVPCID: u32 = Features::INVPCID.bits; +const MPX: u32 = Features::MPX.bits; +const SHA: u32 = Features::SHA.bits; +const PREFETCHWT1: u32 = Features::PREFETCHWT1.bits; +const CYRIX: u32 = Features::CYRIX.bits; +const AMD: u32 = Features::AMD.bits; lazy_static! { static ref OPMAP: HashMap<&'static str, &'static [Opdata]> = { diff --git a/plugin/src/common.rs b/plugin/src/common.rs index be92d77118..9f4e454824 100644 --- a/plugin/src/common.rs +++ b/plugin/src/common.rs @@ -1,20 +1,20 @@ //! This module contains various infrastructure that is common across all assembler backends use proc_macro2::{Span, TokenTree}; -use quote::ToTokens; use quote::quote; -use syn::spanned::Spanned; +use quote::ToTokens; use syn::parse; +use syn::spanned::Spanned; use syn::Token; -use crate::parse_helpers::{ParseOpt, eat_pseudo_keyword}; +use crate::parse_helpers::{eat_pseudo_keyword, ParseOpt}; use crate::serialize; /// Enum representing the result size of a value/expression/register/etc in bytes. /// Uses the NASM syntax for sizes (a word is 16 bits) #[derive(Debug, PartialOrd, PartialEq, Ord, Eq, Hash, Clone, Copy)] pub enum Size { - BYTE = 1, - WORD = 2, + BYTE = 1, + WORD = 2, DWORD = 4, FWORD = 6, QWORD = 8, @@ -29,27 +29,29 @@ impl Size { } pub fn as_literal(self) -> syn::Ident { - syn::Ident::new(match self { - Size::BYTE => "i8", - Size::WORD => "i16", - Size::DWORD => "i32", - Size::FWORD => "i48", - Size::QWORD => "i64", - Size::PWORD => "i80", - Size::OWORD => "i128", - Size::HWORD => "i256" - }, Span::mixed_site()) + syn::Ident::new( + match self { + Size::BYTE => "i8", + Size::WORD => "i16", + Size::DWORD => "i32", + Size::FWORD => "i48", + Size::QWORD => "i64", + Size::PWORD => "i80", + Size::OWORD => "i128", + Size::HWORD => "i256", + }, + Span::mixed_site(), + ) } } - /** * Jump types */ #[derive(Debug, Clone)] pub struct Jump { pub kind: JumpKind, - pub offset: Option + pub offset: Option, } #[derive(Debug, Clone)] @@ -61,7 +63,7 @@ pub enum JumpKind { Backward(syn::Ident), // > label (["+" "-"] offset)? Forward(syn::Ident), // < label (["+" "-"] offset)? Dynamic(syn::Expr), // =>expr | => (expr) (["+" "-"] offset)? - Bare(syn::Expr) // jump to this address + Bare(syn::Expr), // jump to this address } impl ParseOpt for Jump { @@ -70,7 +72,10 @@ impl ParseOpt for Jump { if eat_pseudo_keyword(input, "extern") { let expr: syn::Expr = input.parse()?; - return Ok(Some(Jump { kind: JumpKind::Bare(expr), offset: None })); + return Ok(Some(Jump { + kind: JumpKind::Bare(expr), + offset: None, + })); } // -> global_label @@ -123,7 +128,6 @@ impl ParseOpt for Jump { let expr: syn::Expr = input.parse()?; Some(expr) - } else { None }; @@ -134,10 +138,7 @@ impl ParseOpt for Jump { impl Jump { pub fn new(kind: JumpKind, offset: Option) -> Jump { - Jump { - kind, - offset - } + Jump { kind, offset } } /// Takes a jump and encodes it as a relocation starting `start_offset` bytes ago, relative to `ref_offset`. @@ -156,7 +157,7 @@ impl Jump { target_offset, field_offset, ref_offset, - kind: serialize::expr_tuple_of_u8s(span, data) + kind: serialize::expr_tuple_of_u8s(span, data), }; match self.kind { JumpKind::Global(ident) => Stmt::GlobalJumpTarget(ident, relocation), @@ -178,7 +179,6 @@ impl Jump { } } - /// A relocation entry description #[derive(Debug, Clone)] pub struct Relocation { @@ -188,7 +188,6 @@ pub struct Relocation { pub kind: TokenTree, } - /// An abstract representation of a dynasm runtime statement to be emitted #[derive(Debug, Clone)] pub enum Stmt { @@ -223,7 +222,7 @@ pub enum Stmt { PrefixStmt(TokenTree), // a random statement that has to be inserted between assembly hunks - Stmt(TokenTree) + Stmt(TokenTree), } // convenience methods @@ -247,28 +246,26 @@ impl Stmt { } } - // Makes a None-delimited TokenTree item out of anything that can be converted to tokens. // This is a useful shortcut to escape issues around not-properly delimited tokenstreams // because it is guaranteed to be parsed back properly to its source ast at type-level. pub fn delimited(expr: T) -> TokenTree { let span = expr.span(); - let mut group = proc_macro2::Group::new( - proc_macro2::Delimiter::None, expr.into_token_stream() - ); + let mut group = proc_macro2::Group::new(proc_macro2::Delimiter::None, expr.into_token_stream()); group.set_span(span); proc_macro2::TokenTree::Group(group) } - /// Create a bitmask with `scale` bits set pub fn bitmask(scale: u8) -> u32 { - 1u32.checked_shl(u32::from(scale)).unwrap_or(0).wrapping_sub(1) + 1u32.checked_shl(u32::from(scale)) + .unwrap_or(0) + .wrapping_sub(1) } - /// Create a bitmask with `scale` bits set pub fn bitmask64(scale: u8) -> u64 { - 1u64.checked_shl(u32::from(scale)).unwrap_or(0).wrapping_sub(1) + 1u64.checked_shl(u32::from(scale)) + .unwrap_or(0) + .wrapping_sub(1) } - diff --git a/plugin/src/directive.rs b/plugin/src/directive.rs index b3388a239f..d6a3191297 100644 --- a/plugin/src/directive.rs +++ b/plugin/src/directive.rs @@ -1,21 +1,24 @@ use std::collections::hash_map::Entry; +use proc_macro_error::emit_error; +use quote::quote; use syn::parse; use syn::Token; -use quote::quote; -use proc_macro_error::emit_error; -use crate::common::{Stmt, Size, delimited}; use crate::arch; -use crate::DynasmContext; +use crate::common::{delimited, Size, Stmt}; use crate::parse_helpers::ParseOptExt; +use crate::DynasmContext; -pub(crate) fn evaluate_directive(invocation_context: &mut DynasmContext, stmts: &mut Vec, input: parse::ParseStream) -> parse::Result<()> { +pub(crate) fn evaluate_directive( + invocation_context: &mut DynasmContext, + stmts: &mut Vec, + input: parse::ParseStream, +) -> parse::Result<()> { let directive: syn::Ident = input.parse()?; match directive.to_string().as_str() { // TODO: oword, qword, float, double, long double - "arch" => { // ; .arch ident let arch: syn::Ident = input.parse()?; @@ -24,7 +27,7 @@ pub(crate) fn evaluate_directive(invocation_context: &mut DynasmContext, stmts: } else { emit_error!(arch, "Unknown architecture '{}'", arch); } - }, + } "feature" => { // ; .feature ident ("," ident) * let mut features = Vec::new(); @@ -42,17 +45,17 @@ pub(crate) fn evaluate_directive(invocation_context: &mut DynasmContext, stmts: features.pop(); } invocation_context.current_arch.set_features(&features); - }, + } // ; .byte (expr ("," expr)*)? - "byte" => directive_const(invocation_context, stmts, input, Size::BYTE)?, - "word" => directive_const(invocation_context, stmts, input, Size::WORD)?, + "byte" => directive_const(invocation_context, stmts, input, Size::BYTE)?, + "word" => directive_const(invocation_context, stmts, input, Size::WORD)?, "dword" => directive_const(invocation_context, stmts, input, Size::DWORD)?, "qword" => directive_const(invocation_context, stmts, input, Size::QWORD)?, "bytes" => { // ; .bytes expr let iterator: syn::Expr = input.parse()?; stmts.push(Stmt::ExprExtend(delimited(iterator))); - }, + } "align" => { // ; .align expr ("," expr) // this might need to be architecture dependent @@ -68,7 +71,7 @@ pub(crate) fn evaluate_directive(invocation_context: &mut DynasmContext, stmts: }; stmts.push(Stmt::Align(delimited(value), with)); - }, + } "alias" => { // ; .alias ident, ident // consider changing this to ; .alias ident = ident next breaking change @@ -80,13 +83,17 @@ pub(crate) fn evaluate_directive(invocation_context: &mut DynasmContext, stmts: match invocation_context.aliases.entry(alias_name) { Entry::Occupied(_) => { - emit_error!(alias, "Duplicate alias definition, alias '{}' was already defined", alias); - }, + emit_error!( + alias, + "Duplicate alias definition, alias '{}' was already defined", + alias + ); + } Entry::Vacant(v) => { v.insert(reg.to_string()); } } - }, + } d => { // unknown directive. skip ahead until we hit a ; so the parser can recover emit_error!(directive, "unknown directive '{}'", d); @@ -97,27 +104,35 @@ pub(crate) fn evaluate_directive(invocation_context: &mut DynasmContext, stmts: Ok(()) } -fn directive_const(invocation_context: &mut DynasmContext, stmts: &mut Vec, input: parse::ParseStream, size: Size) -> parse::Result<()> { +fn directive_const( + invocation_context: &mut DynasmContext, + stmts: &mut Vec, + input: parse::ParseStream, + size: Size, +) -> parse::Result<()> { // FIXME: this could be replaced by a Punctuated parser? // parse (expr (, expr)*)? if input.is_empty() || input.peek(Token![;]) { - return Ok(()) + return Ok(()); } if let Some(jump) = input.parse_opt()? { - invocation_context.current_arch.handle_static_reloc(stmts, jump, size); + invocation_context + .current_arch + .handle_static_reloc(stmts, jump, size); } else { let expr: syn::Expr = input.parse()?; stmts.push(Stmt::ExprSigned(delimited(expr), size)); } - while input.peek(Token![,]) { let _: Token![,] = input.parse()?; if let Some(jump) = input.parse_opt()? { - invocation_context.current_arch.handle_static_reloc(stmts, jump, size); + invocation_context + .current_arch + .handle_static_reloc(stmts, jump, size); } else { let expr: syn::Expr = input.parse()?; stmts.push(Stmt::ExprSigned(delimited(expr), size)); diff --git a/plugin/src/lib.rs b/plugin/src/lib.rs index 2ae7029a6a..5a8a22ae09 100644 --- a/plugin/src/lib.rs +++ b/plugin/src/lib.rs @@ -1,7 +1,7 @@ #![cfg_attr(feature = "filelocal", feature(proc_macro_span))] //! The dynasm crate contains the procedural macros that power the magic of dynasm-rs. It seamlessly integrates //! a full dynamic assembler for several assembly dialects with Rust code. -//! +//! //! As this is a proc-macro crate, it only exports the `dynasm!` and `dynasm_backwards!` macros. //! Any directives used in these macro invocations are normally local to the invocation itself, unless //! the `filelocal` crate feature is used. This feature requires a nightly compiler. @@ -11,31 +11,35 @@ extern crate proc_macro; -use syn::parse; -use syn::{Token, parse_macro_input}; -use proc_macro2::{TokenTree, TokenStream}; -use quote::quote; +use proc_macro2::{TokenStream, TokenTree}; use proc_macro_error::proc_macro_error; +use quote::quote; +use syn::parse; +use syn::{parse_macro_input, Token}; use std::collections::HashMap; -#[cfg(feature = "filelocal")] -use std::sync::{MutexGuard, Mutex}; +#[cfg(any( + feature = "filelocal", + feature = "dynasm_opmap", + feature = "dynasm_extract" +))] +use proc_macro2::Span; #[cfg(feature = "filelocal")] use std::path::PathBuf; -#[cfg(any(feature = "filelocal", feature = "dynasm_opmap", feature = "dynasm_extract"))] -use proc_macro2::Span; +#[cfg(feature = "filelocal")] +use std::sync::{Mutex, MutexGuard}; -/// Module with common infrastructure across assemblers -mod common; /// Module with architecture-specific assembler implementations mod arch; +/// Module with common infrastructure across assemblers +mod common; /// Module contaning the implementation of directives mod directive; -/// Module containing utility functions for creating TokenTrees from assembler / directive output -mod serialize; /// Module containing utility functions for parsing mod parse_helpers; +/// Module containing utility functions for creating TokenTrees from assembler / directive output +mod serialize; /// The whole point. This macro compiles given assembly/Rust templates down to `DynasmApi` and `DynasmLabelApi` /// compliant calls to an assembler. @@ -73,7 +77,7 @@ pub fn dynasm_backwards(tokens: proc_macro::TokenStream) -> proc_macro::TokenStr /// invocation. struct Dynasm { target: TokenTree, - stmts: Vec + stmts: Vec, } /// top-level parsing. Handles common prefix symbols and diverts to the selected architecture @@ -81,7 +85,6 @@ struct Dynasm { /// non-parsing errors happen err() will be called, but this function returns Ok(). impl parse::Parse for Dynasm { fn parse(input: parse::ParseStream) -> parse::Result { - // parse the assembler target declaration let target: syn::Expr = input.parse()?; // and just convert it back to a tokentree since that's how we'll always be using it. @@ -108,7 +111,7 @@ impl parse::Parse for Dynasm { buffer.extend(std::iter::once(input.parse::()?)); } // glue an extra ; on there - buffer.extend(quote! { ; } ); + buffer.extend(quote! { ; }); if !buffer.is_empty() { // ensure that the statement is actually a proper statement and then emit it for serialization @@ -141,7 +144,6 @@ impl parse::Parse for Dynasm { // ; label : if input.peek(syn::Ident) && input.peek2(Token![:]) { - let name: syn::Ident = input.parse()?; let _: Token![:] = input.parse()?; @@ -149,7 +151,6 @@ impl parse::Parse for Dynasm { continue; } - // ; . directive if input.peek(Token![.]) { let _: Token![.] = input.parse()?; @@ -162,15 +163,13 @@ impl parse::Parse for Dynasm { stmts: &mut stmts, invocation_context: &*invocation_context, }; - invocation_context.current_arch.compile_instruction(&mut state, input)?; + invocation_context + .current_arch + .compile_instruction(&mut state, input)?; } - } - Ok(Dynasm { - target, - stmts - }) + Ok(Dynasm { target, stmts }) } } @@ -179,7 +178,6 @@ impl parse::Parse for Dynasm { #[cfg(feature = "dynasm_opmap")] #[proc_macro] pub fn dynasm_opmap(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream { - // parse to ensure that no macro arguments were provided let opmap = parse_macro_input!(tokens as DynasmOpmap); @@ -189,7 +187,7 @@ pub fn dynasm_opmap(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream s.push_str(&match opmap.arch.as_str() { "x64" | "x86" => arch::x64::create_opmap(), "aarch64" => arch::aarch64::create_opmap(), - x => panic!("Unknown architecture {}", x) + x => panic!("Unknown architecture {}", x), }); let token = quote::quote_spanned! { Span::mixed_site()=> @@ -203,14 +201,13 @@ pub fn dynasm_opmap(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream #[cfg(feature = "dynasm_extract")] #[proc_macro] pub fn dynasm_extract(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream { - // parse to ensure that no macro arguments were provided let opmap = parse_macro_input!(tokens as DynasmOpmap); let s = match opmap.arch.as_str() { "x64" | "x86" => "UNIMPLEMENTED".into(), "aarch64" => arch::aarch64::extract_opmap(), - x => panic!("Unknown architecture {}", x) + x => panic!("Unknown architecture {}", x), }; let token = quote::quote_spanned! { Span::mixed_site()=> @@ -221,20 +218,20 @@ pub fn dynasm_extract(tokens: proc_macro::TokenStream) -> proc_macro::TokenStrea /// As dynasm_opmap takes no args it doesn't parse to anything -#[cfg(any(feature="dynasm_extract", feature="dynasm_opmap"))] +#[cfg(any(feature = "dynasm_extract", feature = "dynasm_opmap"))] struct DynasmOpmap { - pub arch: String + pub arch: String, } /// As dynasm_opmap takes no args it doesn't parse to anything. /// This just exists so syn will give an error when no args are present. -#[cfg(any(feature="dynasm_extract", feature="dynasm_opmap"))] +#[cfg(any(feature = "dynasm_extract", feature = "dynasm_opmap"))] impl parse::Parse for DynasmOpmap { fn parse(input: parse::ParseStream) -> parse::Result { let arch: syn::Ident = input.parse()?; Ok(DynasmOpmap { - arch: arch.to_string() + arch: arch.to_string(), }) } } @@ -257,7 +254,7 @@ impl DynasmContext { fn new() -> DynasmContext { DynasmContext { current_arch: arch::from_str(arch::CURRENT_ARCH).expect("Invalid default architecture"), - aliases: HashMap::new() + aliases: HashMap::new(), } } } @@ -265,14 +262,14 @@ impl DynasmContext { // Oneshot context provider #[cfg(not(feature = "filelocal"))] struct ContextProvider { - context: DynasmContext + context: DynasmContext, } #[cfg(not(feature = "filelocal"))] impl ContextProvider { pub fn new() -> ContextProvider { ContextProvider { - context: DynasmContext::new() + context: DynasmContext::new(), } } @@ -284,14 +281,14 @@ impl ContextProvider { /// Filelocal context provider #[cfg(feature = "filelocal")] struct ContextProvider { - guard: MutexGuard<'static, HashMap> + guard: MutexGuard<'static, HashMap>, } #[cfg(feature = "filelocal")] impl ContextProvider { pub fn new() -> ContextProvider { ContextProvider { - guard: CONTEXT_STORAGE.lock().unwrap() + guard: CONTEXT_STORAGE.lock().unwrap(), } } diff --git a/plugin/src/parse_helpers.rs b/plugin/src/parse_helpers.rs index 7e749ed290..c85ada2681 100644 --- a/plugin/src/parse_helpers.rs +++ b/plugin/src/parse_helpers.rs @@ -1,6 +1,6 @@ //! This file contains parsing helpers used by multiple parsing backends -use syn::parse; use std::convert::TryInto; +use syn::parse; /** * Jump types @@ -24,14 +24,16 @@ impl<'a> ParseOptExt for parse::ParseBuffer<'a> { /// Tries to parse an ident that has a specific name as a keyword. Returns true if it worked. pub fn eat_pseudo_keyword(input: parse::ParseStream, kw: &str) -> bool { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if ident == kw { - return Ok(((), rest)); + input + .step(|cursor| { + if let Some((ident, rest)) = cursor.ident() { + if ident == kw { + return Ok(((), rest)); + } } - } - Err(cursor.error("expected identifier")) - }).is_ok() + Err(cursor.error("expected identifier")) + }) + .is_ok() } /// parses an ident, but instead of syn's Parse impl it does also parse keywords as idents @@ -47,8 +49,12 @@ pub fn parse_ident_or_rust_keyword(input: parse::ParseStream) -> parse::Result Option<&syn::Ident> { let path = match *expr { - syn::Expr::Path(syn::ExprPath {ref path, qself: None, ..}) => path, - _ => return None + syn::Expr::Path(syn::ExprPath { + ref path, + qself: None, + .. + }) => path, + _ => return None, }; if path.leading_colon.is_some() || path.segments.len() != 1 { @@ -72,8 +78,8 @@ pub fn as_lit(expr: &syn::Expr) -> Option<&syn::Lit> { } match inner { - syn::Expr::Lit(syn::ExprLit { ref lit, .. } ) => Some(lit), - _ => None + syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => Some(lit), + _ => None, } } @@ -86,22 +92,24 @@ pub fn as_lit_with_negation(expr: &syn::Expr) -> Option<(&syn::Lit, bool)> { } match inner { - syn::Expr::Lit(syn::ExprLit { ref lit, .. } ) => Some((lit, false)), - syn::Expr::Unary(syn::ExprUnary { op: syn::UnOp::Neg(_), ref expr, .. } ) => { - match &**expr { - syn::Expr::Lit(syn::ExprLit { ref lit, .. } ) => Some((lit, true)), - _ => None - } - } - _ => None + syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => Some((lit, false)), + syn::Expr::Unary(syn::ExprUnary { + op: syn::UnOp::Neg(_), + ref expr, + .. + }) => match &**expr { + syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => Some((lit, true)), + _ => None, + }, + _ => None, } } /// checks if an expression is a constant number literal pub fn as_number(expr: &syn::Expr) -> Option { - match as_lit(expr)? { + match as_lit(expr)? { syn::Lit::Int(i) => i.base10_parse().ok(), - _ => None + _ => None, } } @@ -109,13 +117,15 @@ pub fn as_number(expr: &syn::Expr) -> Option { pub fn as_signed_number(expr: &syn::Expr) -> Option { let (expr, negated) = as_lit_with_negation(expr)?; match expr { - syn::Lit::Int(i) => if let Ok(value) = i.base10_parse::() { - let value: i64 = value.try_into().ok()?; - Some (if negated {-value} else {value}) - } else { - None - }, - _ => None + syn::Lit::Int(i) => { + if let Ok(value) = i.base10_parse::() { + let value: i64 = value.try_into().ok()?; + Some(if negated { -value } else { value }) + } else { + None + } + } + _ => None, } } @@ -123,7 +133,10 @@ pub fn as_signed_number(expr: &syn::Expr) -> Option { pub fn as_float(expr: &syn::Expr) -> Option { let (expr, negated) = as_lit_with_negation(expr)?; match expr { - syn::Lit::Float(i) => i.base10_parse::().ok().map(|i| if negated { -i } else { i } ), - _ => None + syn::Lit::Float(i) => i + .base10_parse::() + .ok() + .map(|i| if negated { -i } else { i }), + _ => None, } } diff --git a/plugin/src/serialize.rs b/plugin/src/serialize.rs index 4ecd4ae399..7df0dc942d 100644 --- a/plugin/src/serialize.rs +++ b/plugin/src/serialize.rs @@ -1,16 +1,15 @@ +use proc_macro2::{Literal, Span, TokenStream, TokenTree}; +use quote::{quote, quote_spanned, ToTokens}; use syn; use syn::parse; use syn::spanned::Spanned; -use proc_macro2::{Span, TokenStream, TokenTree, Literal}; -use quote::{quote, quote_spanned, ToTokens}; use byteorder::{ByteOrder, LittleEndian}; -use crate::common::{Size, Stmt, delimited, Relocation}; +use crate::common::{delimited, Relocation, Size, Stmt}; use std::convert::TryInto; - /// Converts a sequence of abstract Statements to actual tokens pub fn serialize(name: &TokenTree, stmts: Vec) -> TokenStream { // first, try to fold constants into a byte stream @@ -18,30 +17,28 @@ pub fn serialize(name: &TokenTree, stmts: Vec) -> TokenStream { let mut const_buffer = Vec::new(); for stmt in stmts { match stmt { - Stmt::Const(value, size) => { - match size { - Size::BYTE => const_buffer.push(value as u8), - Size::WORD => { - let mut buffer = [0u8; 2]; - LittleEndian::write_u16(&mut buffer, value as u16); - const_buffer.extend(&buffer); - }, - Size::DWORD => { - let mut buffer = [0u8; 4]; - LittleEndian::write_u32(&mut buffer, value as u32); - const_buffer.extend(&buffer); - }, - Size::QWORD => { - let mut buffer = [0u8; 8]; - LittleEndian::write_u64(&mut buffer, value as u64); - const_buffer.extend(&buffer); - }, - _ => unimplemented!() + Stmt::Const(value, size) => match size { + Size::BYTE => const_buffer.push(value as u8), + Size::WORD => { + let mut buffer = [0u8; 2]; + LittleEndian::write_u16(&mut buffer, value as u16); + const_buffer.extend(&buffer); + } + Size::DWORD => { + let mut buffer = [0u8; 4]; + LittleEndian::write_u32(&mut buffer, value as u32); + const_buffer.extend(&buffer); } + Size::QWORD => { + let mut buffer = [0u8; 8]; + LittleEndian::write_u64(&mut buffer, value as u64); + const_buffer.extend(&buffer); + } + _ => unimplemented!(), }, Stmt::Extend(data) => { const_buffer.extend(data); - }, + } s => { // empty the const buffer if !const_buffer.is_empty() { @@ -67,34 +64,112 @@ pub fn serialize(name: &TokenTree, stmts: Vec) -> TokenStream { for stmt in folded_stmts { let (method, args) = match stmt { Stmt::Const(_, _) => unreachable!(), - Stmt::ExprUnsigned(expr, Size::BYTE) => ("push", vec![expr]), - Stmt::ExprUnsigned(expr, Size::WORD) => ("push_u16", vec![expr]), + Stmt::ExprUnsigned(expr, Size::BYTE) => ("push", vec![expr]), + Stmt::ExprUnsigned(expr, Size::WORD) => ("push_u16", vec![expr]), Stmt::ExprUnsigned(expr, Size::DWORD) => ("push_u32", vec![expr]), Stmt::ExprUnsigned(expr, Size::QWORD) => ("push_u64", vec![expr]), Stmt::ExprUnsigned(_, _) => unimplemented!(), - Stmt::ExprSigned( expr, Size::BYTE) => ("push_i8", vec![expr]), - Stmt::ExprSigned( expr, Size::WORD) => ("push_i16", vec![expr]), - Stmt::ExprSigned( expr, Size::DWORD) => ("push_i32", vec![expr]), - Stmt::ExprSigned( expr, Size::QWORD) => ("push_i64", vec![expr]), + Stmt::ExprSigned(expr, Size::BYTE) => ("push_i8", vec![expr]), + Stmt::ExprSigned(expr, Size::WORD) => ("push_i16", vec![expr]), + Stmt::ExprSigned(expr, Size::DWORD) => ("push_i32", vec![expr]), + Stmt::ExprSigned(expr, Size::QWORD) => ("push_i64", vec![expr]), Stmt::ExprSigned(_, _) => unimplemented!(), - Stmt::Extend(data) => ("extend", vec![Literal::byte_string(&data).into()]), + Stmt::Extend(data) => ("extend", vec![Literal::byte_string(&data).into()]), Stmt::ExprExtend(expr) => ("extend", vec![expr]), - Stmt::Align(expr, with) => ("align", vec![expr, with]), + Stmt::Align(expr, with) => ("align", vec![expr, with]), Stmt::GlobalLabel(n) => ("global_label", vec![expr_string_from_ident(&n)]), - Stmt::LocalLabel(n) => ("local_label", vec![expr_string_from_ident(&n)]), + Stmt::LocalLabel(n) => ("local_label", vec![expr_string_from_ident(&n)]), Stmt::DynamicLabel(expr) => ("dynamic_label", vec![expr]), - Stmt::GlobalJumpTarget(n, Relocation { target_offset, field_offset, ref_offset, kind }) => - ("global_reloc" , vec![expr_string_from_ident(&n), target_offset, Literal::u8_suffixed(field_offset).into(), Literal::u8_suffixed(ref_offset).into(), kind]), - Stmt::ForwardJumpTarget(n, Relocation { target_offset, field_offset, ref_offset, kind }) => - ("forward_reloc" , vec![expr_string_from_ident(&n), target_offset, Literal::u8_suffixed(field_offset).into(), Literal::u8_suffixed(ref_offset).into(), kind]), - Stmt::BackwardJumpTarget(n, Relocation { target_offset, field_offset, ref_offset, kind }) => - ("backward_reloc", vec![expr_string_from_ident(&n), target_offset, Literal::u8_suffixed(field_offset).into(), Literal::u8_suffixed(ref_offset).into(), kind]), - Stmt::DynamicJumpTarget(expr, Relocation { target_offset, field_offset, ref_offset, kind }) => - ("dynamic_reloc" , vec![expr, target_offset, Literal::u8_suffixed(field_offset).into(), Literal::u8_suffixed(ref_offset).into(), kind]), - Stmt::BareJumpTarget(expr, Relocation { field_offset, ref_offset, kind, .. }) => - ("bare_reloc" , vec![expr, Literal::u8_suffixed(field_offset).into(), Literal::u8_suffixed(ref_offset).into(), kind]), - Stmt::PrefixStmt(s) - | Stmt::Stmt(s) => { + Stmt::GlobalJumpTarget( + n, + Relocation { + target_offset, + field_offset, + ref_offset, + kind, + }, + ) => ( + "global_reloc", + vec![ + expr_string_from_ident(&n), + target_offset, + Literal::u8_suffixed(field_offset).into(), + Literal::u8_suffixed(ref_offset).into(), + kind, + ], + ), + Stmt::ForwardJumpTarget( + n, + Relocation { + target_offset, + field_offset, + ref_offset, + kind, + }, + ) => ( + "forward_reloc", + vec![ + expr_string_from_ident(&n), + target_offset, + Literal::u8_suffixed(field_offset).into(), + Literal::u8_suffixed(ref_offset).into(), + kind, + ], + ), + Stmt::BackwardJumpTarget( + n, + Relocation { + target_offset, + field_offset, + ref_offset, + kind, + }, + ) => ( + "backward_reloc", + vec![ + expr_string_from_ident(&n), + target_offset, + Literal::u8_suffixed(field_offset).into(), + Literal::u8_suffixed(ref_offset).into(), + kind, + ], + ), + Stmt::DynamicJumpTarget( + expr, + Relocation { + target_offset, + field_offset, + ref_offset, + kind, + }, + ) => ( + "dynamic_reloc", + vec![ + expr, + target_offset, + Literal::u8_suffixed(field_offset).into(), + Literal::u8_suffixed(ref_offset).into(), + kind, + ], + ), + Stmt::BareJumpTarget( + expr, + Relocation { + field_offset, + ref_offset, + kind, + .. + }, + ) => ( + "bare_reloc", + vec![ + expr, + Literal::u8_suffixed(field_offset).into(), + Literal::u8_suffixed(ref_offset).into(), + kind, + ], + ), + Stmt::PrefixStmt(s) | Stmt::Stmt(s) => { output.extend(quote! { #s ; }); @@ -113,7 +188,7 @@ pub fn serialize(name: &TokenTree, stmts: Vec) -> TokenStream { if output.is_empty() { output } else { - quote!{ + quote! { { #output } @@ -135,16 +210,51 @@ pub fn invert(stmts: Vec) -> Vec { while let Some(stmt) = iter.next() { // if we find a relocation, note it down together with the current counter value and the value at which it can be safely emitted match stmt { - Stmt::GlobalJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::ForwardJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::BackwardJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::DynamicJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::BareJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) => { + Stmt::GlobalJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::ForwardJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::BackwardJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::DynamicJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::BareJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) => { let trigger = counter + std::cmp::max(field_offset, ref_offset) as usize; relocation_buf.push((trigger, counter, stmt)); continue; - }, - _ => () + } + _ => (), }; while let Some(Stmt::PrefixStmt(_)) = iter.peek() { @@ -156,15 +266,17 @@ pub fn invert(stmts: Vec) -> Vec { // otherwise, calculate the size of the current statement and add that to the counter let size = match &stmt { - Stmt::Const(_, size) - | Stmt::ExprUnsigned(_, size) - | Stmt::ExprSigned(_, size) => size.in_bytes() as usize, + Stmt::Const(_, size) | Stmt::ExprUnsigned(_, size) | Stmt::ExprSigned(_, size) => { + size.in_bytes() as usize + } Stmt::Extend(buf) => buf.len(), - Stmt::ExprExtend(_) - | Stmt::Align(_, _) => { - assert!(relocation_buf.is_empty(), "Tried to hoist relocation over unknown size"); + Stmt::ExprExtend(_) | Stmt::Align(_, _) => { + assert!( + relocation_buf.is_empty(), + "Tried to hoist relocation over unknown size" + ); 0 - }, + } Stmt::GlobalLabel(_) | Stmt::LocalLabel(_) | Stmt::DynamicLabel(_) @@ -189,17 +301,54 @@ pub fn invert(stmts: Vec) -> Vec { } // apply the fixups and emit - let change: u8 = (counter - orig_counter).try_into().expect("Tried to hoist a relocation by over 255 bytes"); + let change: u8 = (counter - orig_counter) + .try_into() + .expect("Tried to hoist a relocation by over 255 bytes"); match &mut stmt { - Stmt::GlobalJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::ForwardJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::BackwardJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::DynamicJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) - | Stmt::BareJumpTarget(_, Relocation { field_offset, ref_offset, .. } ) => { + Stmt::GlobalJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::ForwardJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::BackwardJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::DynamicJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) + | Stmt::BareJumpTarget( + _, + Relocation { + field_offset, + ref_offset, + .. + }, + ) => { *field_offset = change - *field_offset; *ref_offset = change - *ref_offset; - }, - _ => unreachable!() + } + _ => unreachable!(), } reversed.push(stmt); } @@ -213,7 +362,6 @@ pub fn invert(stmts: Vec) -> Vec { // this collection is arbitrary and purely based on what special things are needed for assembler // codegen implementations - // expression of value 0. sometimes needed. pub fn expr_zero() -> TokenTree { proc_macro2::Literal::u8_unsuffixed(0).into() @@ -228,11 +376,14 @@ pub fn expr_string_from_ident(i: &syn::Ident) -> TokenTree { // Makes a dynamic scale expression. Useful for x64 generic addressing mode pub fn expr_dynscale(scale: &TokenTree, rest: &TokenTree) -> (TokenTree, TokenTree) { let tempval = expr_encode_x64_sib_scale(&scale); - (delimited(quote_spanned! { Span::mixed_site()=> - let temp = #tempval - }), delimited(quote_spanned! { Span::mixed_site()=> - #rest | ((temp & 3) << 6) - })) + ( + delimited(quote_spanned! { Span::mixed_site()=> + let temp = #tempval + }), + delimited(quote_spanned! { Span::mixed_site()=> + #rest | ((temp & 3) << 6) + }), + ) } // makes (a, b) @@ -250,10 +401,10 @@ pub fn expr_tuple_of_u8s(span: Span, data: &[u8]) -> TokenTree { } // makes sum(exprs) -pub fn expr_add_many>(span: Span, mut exprs: T) -> Option { +pub fn expr_add_many>(span: Span, mut exprs: T) -> Option { let first_expr = exprs.next()?; - let tokens = quote_spanned!{ span=> + let tokens = quote_spanned! { span=> #first_expr #( + #exprs )* }; @@ -289,9 +440,13 @@ pub fn expr_mask_shift_or(orig: &TokenTree, expr: &TokenTree, mask: u64, shift: }) } - /// returns orig & !((expr & mask) << shift) -pub fn expr_mask_shift_inverted_and(orig: &TokenTree, expr: &TokenTree, mask: u64, shift: i8) -> TokenTree { +pub fn expr_mask_shift_inverted_and( + orig: &TokenTree, + expr: &TokenTree, + mask: u64, + shift: i8, +) -> TokenTree { let span = expr.span(); let mask: TokenTree = proc_macro2::Literal::u64_unsuffixed(mask).into(); diff --git a/runtime/src/aarch64.rs b/runtime/src/aarch64.rs index 82aab20020..8e72d3a0ed 100644 --- a/runtime/src/aarch64.rs +++ b/runtime/src/aarch64.rs @@ -12,7 +12,7 @@ //! //! ## Enums //! -//! There are enumerator of every logically distinct register family usable in aarch64. +//! There are enumerator of every logically distinct register family usable in aarch64. //! These enums implement the [`Register`] trait and their discriminant values match their numeric encoding in dynamic register literals. //! //! *Note: The presence of some registers listed here is purely what is encodable. Check the relevant architecture documentation to find what is architecturally valid.* @@ -22,8 +22,10 @@ //! The aarch64 architecture allows encoding several special types of immediates. The encoding implementations for these immediate types have been exposed to assist the user //! in correctly using these instructions. They will return `Some(encoding)` only if the given value can be encoded losslessly in that immediate type. +use crate::relocations::{ + fits_signed_bitfield, ImpossibleRelocation, Relocation, RelocationKind, RelocationSize, +}; use crate::Register; -use crate::relocations::{Relocation, RelocationSize, RelocationKind, ImpossibleRelocation, fits_signed_bitfield}; use byteorder::{ByteOrder, LittleEndian}; use std::convert::TryFrom; @@ -53,52 +55,52 @@ impl Aarch64Relocation { Self::ADR => 0x9F00_001F, Self::ADRP => 0x9F00_001F, Self::TBZ => 0xFFF8_001F, - Self::Plain(_) => 0 + Self::Plain(_) => 0, } } fn encode(&self, value: isize) -> Result { - let value = i64::try_from(value).map_err(|_| ImpossibleRelocation { } )?; + let value = i64::try_from(value).map_err(|_| ImpossibleRelocation {})?; Ok(match self { Self::B => { if value & 3 != 0 || !fits_signed_bitfield(value >> 2, 26) { - return Err(ImpossibleRelocation { } ); + return Err(ImpossibleRelocation {}); } let value = (value >> 2) as u32; value & 0x3FF_FFFF - }, + } Self::BCOND => { if value & 3 != 0 || !fits_signed_bitfield(value >> 2, 19) { - return Err(ImpossibleRelocation { } ); + return Err(ImpossibleRelocation {}); } let value = (value >> 2) as u32; (value & 0x7FFFF) << 5 - }, + } Self::ADR => { if !fits_signed_bitfield(value, 21) { - return Err(ImpossibleRelocation { } ); + return Err(ImpossibleRelocation {}); } let low = (value) as u32; let high = (value >> 2) as u32; ((high & 0x7FFFF) << 5) | ((low & 3) << 29) - }, + } Self::ADRP => { let value = value + 0xFFF; if !fits_signed_bitfield(value >> 12, 21) { - return Err(ImpossibleRelocation { } ); + return Err(ImpossibleRelocation {}); } let low = (value >> 12) as u32; let high = (value >> 14) as u32; ((high & 0x7FFFF) << 5) | ((low & 3) << 29) - }, + } Self::TBZ => { if value & 3 != 0 || !fits_signed_bitfield(value >> 2, 14) { - return Err(ImpossibleRelocation { } ); + return Err(ImpossibleRelocation {}); } let value = (value >> 2) as u32; (value & 0x3FFF) << 5 - }, - Self::Plain(_) => return Err(ImpossibleRelocation { } ) + } + Self::Plain(_) => return Err(ImpossibleRelocation {}), }) } } @@ -112,7 +114,7 @@ impl Relocation for Aarch64Relocation { 2 => Self::ADR, 3 => Self::ADRP, 4 => Self::TBZ, - x => Self::Plain(RelocationSize::from_encoding(x - 4)) + x => Self::Plain(RelocationSize::from_encoding(x - 4)), } } fn from_size(size: RelocationSize) -> Self { @@ -145,24 +147,12 @@ impl Relocation for Aarch64Relocation { let mask = !self.op_mask(); let value = LittleEndian::read_u32(buf); let unpacked = match self { - Self::B => u64::from( - value & mask - ) << 2, - Self::BCOND => u64::from( - (value & mask) >> 5 - ) << 2, - Self::ADR => u64::from( - (((value >> 5 ) & 0x7FFFF) << 2) | - ((value >> 29) & 3 ) - ), - Self::ADRP => u64::from( - (((value >> 5 ) & 0x7FFFF) << 2) | - ((value >> 29) & 3 ) - ) << 12, - Self::TBZ => u64::from( - (value & mask) >> 5 - ) << 2, - Self::Plain(_) => unreachable!() + Self::B => u64::from(value & mask) << 2, + Self::BCOND => u64::from((value & mask) >> 5) << 2, + Self::ADR => u64::from((((value >> 5) & 0x7FFFF) << 2) | ((value >> 29) & 3)), + Self::ADRP => u64::from((((value >> 5) & 0x7FFFF) << 2) | ((value >> 29) & 3)) << 12, + Self::TBZ => u64::from((value & mask) >> 5) << 2, + Self::Plain(_) => unreachable!(), }; // Sign extend. @@ -172,7 +162,7 @@ impl Relocation for Aarch64Relocation { Self::ADR => 21, Self::ADRP => 33, Self::TBZ => 14, - Self::Plain(_) => unreachable!() + Self::Plain(_) => unreachable!(), }; let offset = 1u64 << (bits - 1); let value: u64 = (unpacked ^ offset) - offset; @@ -194,7 +184,6 @@ pub type AssemblyModifier<'a> = crate::Modifier<'a, Aarch64Relocation>; /// An aarch64 UncommittedModifier. This is aliased here for backwards compatability. pub type UncommittedModifier<'a> = crate::UncommittedModifier<'a>; - /// Helper function for validating that a given value can be encoded as a 32-bit logical immediate pub fn encode_logical_immediate_32bit(value: u32) -> Option { let transitions = value ^ value.rotate_right(1); @@ -261,19 +250,42 @@ pub fn encode_floating_point_immediate(value: f32) -> Option { } } - /// 4 or 8-byte general purpopse registers, where X31 is the zero register. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RX { - X0 = 0x00, X1 = 0x01, X2 = 0x02, X3 = 0x03, - X4 = 0x04, X5 = 0x05, X6 = 0x06, X7 = 0x07, - X8 = 0x08, X9 = 0x09, X10= 0x0A, X11= 0x0B, - X12= 0x0C, X13= 0x0D, X14= 0x0E, X15= 0x0F, - X16= 0x10, X17= 0x11, X18= 0x12, X19= 0x13, - X20= 0x14, X21= 0x15, X22= 0x16, X23= 0x17, - X24= 0x18, X25= 0x19, X26= 0x1A, X27= 0x1B, - X28= 0x1C, X29= 0x1D, X30= 0x1E, XZR= 0x1F, + X0 = 0x00, + X1 = 0x01, + X2 = 0x02, + X3 = 0x03, + X4 = 0x04, + X5 = 0x05, + X6 = 0x06, + X7 = 0x07, + X8 = 0x08, + X9 = 0x09, + X10 = 0x0A, + X11 = 0x0B, + X12 = 0x0C, + X13 = 0x0D, + X14 = 0x0E, + X15 = 0x0F, + X16 = 0x10, + X17 = 0x11, + X18 = 0x12, + X19 = 0x13, + X20 = 0x14, + X21 = 0x15, + X22 = 0x16, + X23 = 0x17, + X24 = 0x18, + X25 = 0x19, + X26 = 0x1A, + X27 = 0x1B, + X28 = 0x1C, + X29 = 0x1D, + X30 = 0x1E, + XZR = 0x1F, } reg_impls!(RX); @@ -282,29 +294,77 @@ reg_impls!(RX); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RXSP { - X0 = 0x00, X1 = 0x01, X2 = 0x02, X3 = 0x03, - X4 = 0x04, X5 = 0x05, X6 = 0x06, X7 = 0x07, - X8 = 0x08, X9 = 0x09, X10= 0x0A, X11= 0x0B, - X12= 0x0C, X13= 0x0D, X14= 0x0E, X15= 0x0F, - X16= 0x10, X17= 0x11, X18= 0x12, X19= 0x13, - X20= 0x14, X21= 0x15, X22= 0x16, X23= 0x17, - X24= 0x18, X25= 0x19, X26= 0x1A, X27= 0x1B, - X28= 0x1C, X29= 0x1D, X30= 0x1E, SP = 0x1F, + X0 = 0x00, + X1 = 0x01, + X2 = 0x02, + X3 = 0x03, + X4 = 0x04, + X5 = 0x05, + X6 = 0x06, + X7 = 0x07, + X8 = 0x08, + X9 = 0x09, + X10 = 0x0A, + X11 = 0x0B, + X12 = 0x0C, + X13 = 0x0D, + X14 = 0x0E, + X15 = 0x0F, + X16 = 0x10, + X17 = 0x11, + X18 = 0x12, + X19 = 0x13, + X20 = 0x14, + X21 = 0x15, + X22 = 0x16, + X23 = 0x17, + X24 = 0x18, + X25 = 0x19, + X26 = 0x1A, + X27 = 0x1B, + X28 = 0x1C, + X29 = 0x1D, + X30 = 0x1E, + SP = 0x1F, } reg_impls!(RXSP); -/// 1, 2, 4, 8 or 16-bytes scalar FP / vector SIMD registers. +/// 1, 2, 4, 8 or 16-bytes scalar FP / vector SIMD registers. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RV { - V0 = 0x00, V1 = 0x01, V2 = 0x02, V3 = 0x03, - V4 = 0x04, V5 = 0x05, V6 = 0x06, V7 = 0x07, - V8 = 0x08, V9 = 0x09, V10= 0x0A, V11= 0x0B, - V12= 0x0C, V13= 0x0D, V14= 0x0E, V15= 0x0F, - V16= 0x10, V17= 0x11, V18= 0x12, V19= 0x13, - V20= 0x14, V21= 0x15, V22= 0x16, V23= 0x17, - V24= 0x18, V25= 0x19, V26= 0x1A, V27= 0x1B, - V28= 0x1C, V29= 0x1D, V30= 0x1E, V31= 0x1F, + V0 = 0x00, + V1 = 0x01, + V2 = 0x02, + V3 = 0x03, + V4 = 0x04, + V5 = 0x05, + V6 = 0x06, + V7 = 0x07, + V8 = 0x08, + V9 = 0x09, + V10 = 0x0A, + V11 = 0x0B, + V12 = 0x0C, + V13 = 0x0D, + V14 = 0x0E, + V15 = 0x0F, + V16 = 0x10, + V17 = 0x11, + V18 = 0x12, + V19 = 0x13, + V20 = 0x14, + V21 = 0x15, + V22 = 0x16, + V23 = 0x17, + V24 = 0x18, + V25 = 0x19, + V26 = 0x1A, + V27 = 0x1B, + V28 = 0x1C, + V29 = 0x1D, + V30 = 0x1E, + V31 = 0x1F, } reg_impls!(RV); diff --git a/runtime/src/components.rs b/runtime/src/components.rs index b7de56775c..4bac50d9b0 100644 --- a/runtime/src/components.rs +++ b/runtime/src/components.rs @@ -1,16 +1,16 @@ //! This module provides several reusable compoments for implementing assemblers -use std::io; use std::collections::hash_map::Entry; use std::collections::BTreeMap; -use std::sync::{Arc, RwLock, RwLockWriteGuard}; +use std::io; use std::mem; +use std::sync::{Arc, RwLock, RwLockWriteGuard}; use fnv::FnvHashMap; -use crate::{DynamicLabel, AssemblyOffset, DynasmError, LabelKind, DynasmLabelApi}; use crate::mmap::{ExecutableBuffer, MutableBuffer}; -use crate::relocations::{Relocation, RelocationKind, RelocationSize, ImpossibleRelocation}; +use crate::relocations::{ImpossibleRelocation, Relocation, RelocationKind, RelocationSize}; +use crate::{AssemblyOffset, DynamicLabel, DynasmError, DynasmLabelApi, LabelKind}; /// A static label represents either a local label or a global label reference. /// Global labels are unique names, which can be referenced multiple times, but only defined once. @@ -27,18 +27,12 @@ pub struct StaticLabel { impl StaticLabel { /// Create a new static label for a global label pub fn global(name: &'static str) -> StaticLabel { - StaticLabel { - name, - version: 0 - } + StaticLabel { name, version: 0 } } /// Create a new static label for a local label, with the given version id to distinguish it. pub fn local(name: &'static str, version: usize) -> StaticLabel { - StaticLabel { - name, - version - } + StaticLabel { name, version } } /// Returns if this static label represents a global label @@ -60,10 +54,7 @@ impl StaticLabel { /// Returns the representation of the first local label used with the given name. pub fn first(name: &'static str) -> StaticLabel { - StaticLabel { - name, - version: 1 - } + StaticLabel { name, version: 1 } } /// Returns the name of this static label @@ -84,7 +75,7 @@ pub struct MemoryManager { asmoffset: usize, // the address that the current execbuffer starts at - execbuffer_addr: usize + execbuffer_addr: usize, } impl MemoryManager { @@ -97,7 +88,7 @@ impl MemoryManager { execbuffer: Arc::new(RwLock::new(execbuffer)), execbuffer_size: initial_mmap_size, asmoffset: 0, - execbuffer_addr + execbuffer_addr, }) } @@ -113,7 +104,10 @@ impl MemoryManager { /// Commits the data from `new` into the managed memory, calling `f` when the buffer is moved to fix anything /// that relies on the address of the buffer - pub fn commit(&mut self, new: &mut Vec, f: F) where F: FnOnce(&mut [u8], usize, usize) { + pub fn commit(&mut self, new: &mut Vec, f: F) + where + F: FnOnce(&mut [u8], usize, usize), + { let old_asmoffset = self.asmoffset; let new_asmoffset = self.asmoffset + new.len(); @@ -128,11 +122,12 @@ impl MemoryManager { } // create a larger writable buffer - let mut new_buffer = MutableBuffer::new(self.execbuffer_size).expect("Could not allocate a larger buffer"); + let mut new_buffer = MutableBuffer::new(self.execbuffer_size) + .expect("Could not allocate a larger buffer"); new_buffer.set_len(new_asmoffset); // copy over the data - new_buffer[.. old_asmoffset].copy_from_slice(&self.execbuffer.read().unwrap()); + new_buffer[..old_asmoffset].copy_from_slice(&self.execbuffer.read().unwrap()); new_buffer[old_asmoffset..].copy_from_slice(&new); let new_buffer_addr = new_buffer.as_ptr() as usize; @@ -141,21 +136,25 @@ impl MemoryManager { // swap the buffers self.execbuffer_addr = new_buffer_addr; - *self.execbuffer.write().unwrap() = new_buffer.make_exec().expect("Could not swap buffer protection modes") - + *self.execbuffer.write().unwrap() = new_buffer + .make_exec() + .expect("Could not swap buffer protection modes") } else { - // temporarily change the buffer protection modes and copy in new data let mut lock = self.write(); let buffer = mem::replace(&mut *lock, ExecutableBuffer::default()); - let mut buffer = buffer.make_mut().expect("Could not swap buffer protection modes"); + let mut buffer = buffer + .make_mut() + .expect("Could not swap buffer protection modes"); // update buffer and length buffer.set_len(new_asmoffset); buffer[old_asmoffset..].copy_from_slice(&new); // repack the buffer - let buffer = buffer.make_exec().expect("Could not swap buffer protection modes"); + let buffer = buffer + .make_exec() + .expect("Could not swap buffer protection modes"); *lock = buffer; } @@ -175,7 +174,7 @@ impl MemoryManager { Err(arc) => Err(Self { execbuffer: arc, ..self - }) + }), } } @@ -185,7 +184,6 @@ impl MemoryManager { } } - /// A registry of labels. Contains all necessessities for keeping track of dynasm labels. /// This is useful when implementing your own assembler and can also be used to query /// assemblers for the offsets of labels. @@ -212,7 +210,10 @@ impl LabelRegistry { /// Create a new, empty label registry with `capacity` space for each different label type. pub fn with_capacity(locals: usize, globals: usize, dynamics: usize) -> LabelRegistry { LabelRegistry { - static_labels: FnvHashMap::with_capacity_and_hasher(locals + globals, Default::default()), + static_labels: FnvHashMap::with_capacity_and_hasher( + locals + globals, + Default::default(), + ), dynamic_labels: Vec::with_capacity(dynamics), local_versions: FnvHashMap::with_capacity_and_hasher(locals, Default::default()), } @@ -233,17 +234,25 @@ impl LabelRegistry { } /// Define a the dynamic label `id` to be located at `offset`. - pub fn define_dynamic(&mut self, id: DynamicLabel, offset: AssemblyOffset) -> Result<(), DynasmError> { + pub fn define_dynamic( + &mut self, + id: DynamicLabel, + offset: AssemblyOffset, + ) -> Result<(), DynasmError> { match self.dynamic_labels.get_mut(id.0) { Some(Some(_)) => return Err(DynasmError::DuplicateLabel(LabelKind::Dynamic(id))), - Some(e) => *e = Some(offset), - None => return Err(DynasmError::UnknownLabel(LabelKind::Dynamic(id))), + Some(e) => *e = Some(offset), + None => return Err(DynasmError::UnknownLabel(LabelKind::Dynamic(id))), } Ok(()) } /// Define a the global label `name` to be located at `offset`. - pub fn define_global(&mut self, name: &'static str, offset: AssemblyOffset) -> Result<(), DynasmError> { + pub fn define_global( + &mut self, + name: &'static str, + offset: AssemblyOffset, + ) -> Result<(), DynasmError> { match self.static_labels.entry(StaticLabel::global(name)) { Entry::Occupied(_) => Err(DynasmError::DuplicateLabel(LabelKind::Global(name))), Entry::Vacant(v) => { @@ -259,39 +268,44 @@ impl LabelRegistry { Entry::Occupied(mut o) => { *o.get_mut() += 1; *o.get() - }, + } Entry::Vacant(v) => { v.insert(1); 1 } }; - self.static_labels.insert(StaticLabel::local(name, generation), offset); + self.static_labels + .insert(StaticLabel::local(name, generation), offset); } /// Turns a local label into a static label, by adding some extra information to it /// so we know what local label it is even after another has been defined pub fn place_local_reference(&self, name: &'static str) -> Option { - self.local_versions.get(name).map(|&version| StaticLabel::local(name, version)) + self.local_versions + .get(name) + .map(|&version| StaticLabel::local(name, version)) } /// Returns the offset at which the dynamic label `id` was defined, if one was defined. pub fn resolve_dynamic(&self, id: DynamicLabel) -> Result { - self.dynamic_labels.get(id.0).and_then(|&e| e).ok_or_else(|| DynasmError::UnknownLabel(LabelKind::Dynamic(id))) + self.dynamic_labels + .get(id.0) + .and_then(|&e| e) + .ok_or_else(|| DynasmError::UnknownLabel(LabelKind::Dynamic(id))) } /// Returns the offset at which the global label `name` was defined, if one was defined. pub fn resolve_static(&self, label: &StaticLabel) -> Result { - self.static_labels.get(label).cloned().ok_or_else(|| DynasmError::UnknownLabel( - if label.is_global() { + self.static_labels.get(label).cloned().ok_or_else(|| { + DynasmError::UnknownLabel(if label.is_global() { LabelKind::Global(label.name) } else { LabelKind::Local(label.name) - } - )) + }) + }) } } - /// An abstraction of a relocation of type `R`, located at `location`. #[derive(Clone, Debug)] pub struct PatchLoc { @@ -309,13 +323,19 @@ pub struct PatchLoc { impl PatchLoc { /// create a new `PatchLoc` - pub fn new(location: AssemblyOffset, target_offset: isize, field_offset: u8, ref_offset: u8, relocation: R) -> PatchLoc { + pub fn new( + location: AssemblyOffset, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + relocation: R, + ) -> PatchLoc { PatchLoc { location, field_offset, ref_offset, relocation, - target_offset + target_offset, } } @@ -323,23 +343,33 @@ impl PatchLoc { /// `buf_offset` is a value that is subtracted from this range when the buffer you want to slice /// with this range is only a part of a bigger buffer. pub fn range(&self, buf_offset: usize) -> std::ops::Range { - let field_offset = self.location.0 - buf_offset - self.field_offset as usize; - field_offset .. field_offset + self.relocation.size() + let field_offset = self.location.0 - buf_offset - self.field_offset as usize; + field_offset..field_offset + self.relocation.size() } /// Returns the actual value that should be inserted at the relocation site. pub fn value(&self, target: usize, buf_addr: usize) -> isize { (match self.relocation.kind() { - RelocationKind::Relative => target.wrapping_sub(self.location.0 - self.ref_offset as usize), - RelocationKind::RelToAbs => target.wrapping_sub(self.location.0 - self.ref_offset as usize + buf_addr), - RelocationKind::AbsToRel => target + buf_addr - }) as isize + self.target_offset + RelocationKind::Relative => { + target.wrapping_sub(self.location.0 - self.ref_offset as usize) + } + RelocationKind::RelToAbs => { + target.wrapping_sub(self.location.0 - self.ref_offset as usize + buf_addr) + } + RelocationKind::AbsToRel => target + buf_addr, + }) as isize + + self.target_offset } /// Patch `buffer` so that this relocation patch will point to `target`. /// `buf_addr` is the address that the assembling buffer will come to reside at when it is assembled. /// `target` is the offset that this relocation will be targetting. - pub fn patch(&self, buffer: &mut [u8], buf_addr: usize, target: usize) -> Result<(), ImpossibleRelocation> { + pub fn patch( + &self, + buffer: &mut [u8], + buf_addr: usize, + target: usize, + ) -> Result<(), ImpossibleRelocation> { let value = self.value(target, buf_addr); self.relocation.write_value(buffer, value) } @@ -361,13 +391,11 @@ impl PatchLoc { pub fn needs_adjustment(&self) -> bool { match self.relocation.kind() { RelocationKind::Relative => false, - RelocationKind::RelToAbs - | RelocationKind::AbsToRel => true, + RelocationKind::RelToAbs | RelocationKind::AbsToRel => true, } } } - /// A registry of relocations and the respective labels they point towards. #[derive(Debug, Default)] pub struct RelocRegistry { @@ -404,36 +432,40 @@ impl RelocRegistry { /// Return an iterator through all defined relocations targeting global labels and the labels they target. /// These relocations are removed from the registry. - pub fn take_statics<'a>(&'a mut self) -> impl Iterator, StaticLabel)> + 'a { + pub fn take_statics<'a>(&'a mut self) -> impl Iterator, StaticLabel)> + 'a { self.static_targets.drain(..) } /// Return an iterator through all defined relocations targeting dynamic labels and the labels they target. /// These relocations are removed from the registry. - pub fn take_dynamics<'a>(&'a mut self) -> impl Iterator, DynamicLabel)> + 'a { + pub fn take_dynamics<'a>( + &'a mut self, + ) -> impl Iterator, DynamicLabel)> + 'a { self.dynamic_targets.drain(..) } } - /// A registry of relocations that have been encoded previously, but need to be adjusted when the address of the buffer they /// reside in changes. #[derive(Debug, Default)] pub struct ManagedRelocs { - managed: BTreeMap> + managed: BTreeMap>, } impl ManagedRelocs { /// Create a new, empty managed relocation registry. pub fn new() -> Self { Self { - managed: BTreeMap::new() + managed: BTreeMap::new(), } } /// Add a relocation to this registry. pub fn add(&mut self, patchloc: PatchLoc) { - self.managed.insert(patchloc.location.0 - patchloc.field_offset as usize, patchloc); + self.managed.insert( + patchloc.location.0 - patchloc.field_offset as usize, + patchloc, + ); } /// Take all items from another registry and add them to this registry @@ -450,19 +482,18 @@ impl ManagedRelocs { return; } - let keys: Vec<_> = self.managed.range(start .. end).map(|(&k, _)| k).collect(); + let keys: Vec<_> = self.managed.range(start..end).map(|(&k, _)| k).collect(); for k in keys { self.managed.remove(&k); } } /// Iterate through all defined managed relocations. - pub fn iter<'a>(&'a self) -> impl Iterator> + 'a { + pub fn iter<'a>(&'a self) -> impl Iterator> + 'a { self.managed.values() - } + } } - #[derive(Clone, Debug)] enum LitPoolEntry { U8(u8), @@ -589,20 +620,44 @@ impl LitPool { LitPoolEntry::U64(value) => assembler.push_u64(value), LitPoolEntry::Dynamic(size, id) => { Self::pad_sized(size, assembler); - assembler.dynamic_relocation(id, 0, size as u8, size as u8, D::Relocation::from_size(size)); - }, + assembler.dynamic_relocation( + id, + 0, + size as u8, + size as u8, + D::Relocation::from_size(size), + ); + } LitPoolEntry::Global(size, name) => { Self::pad_sized(size, assembler); - assembler.global_relocation(name, 0, size as u8, size as u8, D::Relocation::from_size(size)); - }, + assembler.global_relocation( + name, + 0, + size as u8, + size as u8, + D::Relocation::from_size(size), + ); + } LitPoolEntry::Forward(size, name) => { Self::pad_sized(size, assembler); - assembler.forward_relocation(name, 0, size as u8, size as u8, D::Relocation::from_size(size)); - }, + assembler.forward_relocation( + name, + 0, + size as u8, + size as u8, + D::Relocation::from_size(size), + ); + } LitPoolEntry::Backward(size, name) => { Self::pad_sized(size, assembler); - assembler.backward_relocation(name, 0, size as u8, size as u8, D::Relocation::from_size(size)); - }, + assembler.backward_relocation( + name, + 0, + size as u8, + size as u8, + D::Relocation::from_size(size), + ); + } LitPoolEntry::Align(with, alignment) => assembler.align(alignment, with), } } @@ -612,8 +667,8 @@ impl LitPool { #[cfg(test)] mod tests { use crate::*; - use std::fmt::Debug; use relocations::{Relocation, RelocationSize}; + use std::fmt::Debug; #[test] fn test_litpool_size() { @@ -674,13 +729,14 @@ mod tests { assert_eq!(ops.commit(), Ok(())); let buf = ops.finalize().unwrap(); - assert_eq!(&*buf, &[ - 0x12, 0x34, 0x56, 0x00, 0x9A, 0x78, 0x00, 0x00, - 0x12, 0xF0, 0xDE, 0xBC, 0x00, 0x00, 0x00, 0x00, - 0x12, 0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34, - 24 , 0xCC, 0xCC, 0xCC, 20 , 0 , 0x00, 0x00, - 16 , 0 , 0 , 0 , 0x00, 0x00, 0x00, 0x00, - 0xD8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFFu8, - ] as &[u8]); + assert_eq!( + &*buf, + &[ + 0x12, 0x34, 0x56, 0x00, 0x9A, 0x78, 0x00, 0x00, 0x12, 0xF0, 0xDE, 0xBC, 0x00, 0x00, + 0x00, 0x00, 0x12, 0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34, 24, 0xCC, 0xCC, 0xCC, + 20, 0, 0x00, 0x00, 16, 0, 0, 0, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFFu8, + ] as &[u8] + ); } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 31953c868c..2c62d20f1a 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -4,8 +4,8 @@ //! Assemblers that implement these traits, and relocation models for the various supported architectures. Additionally, it also provides the tools //! to write your own Assemblers using these components. -pub mod mmap; pub mod components; +pub mod mmap; pub mod relocations; /// Helper to implement common traits on register enums. @@ -22,41 +22,46 @@ macro_rules! reg_impls { rq.code() } } - } + }; } +pub mod aarch64; pub mod x64; pub mod x86; -pub mod aarch64; pub use crate::mmap::ExecutableBuffer; pub use dynasm::{dynasm, dynasm_backwards}; -use crate::components::{MemoryManager, LabelRegistry, RelocRegistry, ManagedRelocs, PatchLoc, StaticLabel}; +use crate::components::{ + LabelRegistry, ManagedRelocs, MemoryManager, PatchLoc, RelocRegistry, StaticLabel, +}; use crate::relocations::Relocation; -use std::hash::Hash; -use std::iter::Extend; -use std::sync::{Arc, RwLock, RwLockReadGuard}; -use std::io; use std::error; use std::fmt::{self, Debug}; +use std::hash::Hash; +use std::io; +use std::iter::Extend; use std::mem; +use std::sync::{Arc, RwLock, RwLockReadGuard}; /// This macro takes a *const pointer from the source operand, and then casts it to the desired return type. /// this allows it to be used as an easy shorthand for passing pointers as dynasm immediate arguments. #[macro_export] macro_rules! Pointer { - ($e:expr) => {$e as *const _ as _}; + ($e:expr) => { + $e as *const _ as _ + }; } /// Preforms the same action as the `Pointer!` macro, but casts to a *mut pointer. #[macro_export] macro_rules! MutPointer { - ($e:expr) => {$e as *mut _ as _}; + ($e:expr) => { + $e as *mut _ as _ + }; } - /// A struct representing an offset into the assembling buffer of a `DynasmLabelApi` struct. /// The wrapped `usize` is the offset from the start of the assembling buffer in bytes. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -66,7 +71,6 @@ pub struct AssemblyOffset(pub usize); #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct DynamicLabel(usize); - impl DynamicLabel { /// Get the internal ID of this dynamic label. This is only useful for debugging purposes. pub fn get_id(self) -> usize { @@ -74,12 +78,11 @@ impl DynamicLabel { } } - /// A read-only shared reference to the executable buffer inside an `Assembler`. By /// locking it the internal `ExecutableBuffer` can be accessed and executed. #[derive(Debug, Clone)] pub struct Executor { - execbuffer: Arc> + execbuffer: Arc>, } /// A read-only lockable reference to the internal `ExecutableBuffer` of an `Assembler`. @@ -95,7 +98,6 @@ impl Executor { } } - /// A description of a label. Used for error reporting. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum LabelKind { @@ -104,7 +106,7 @@ pub enum LabelKind { /// A global label, like `->label:` Global(&'static str), /// A dynamic label, like `=>value:` - Dynamic(DynamicLabel) + Dynamic(DynamicLabel), } impl fmt::Display for LabelKind { @@ -112,12 +114,11 @@ impl fmt::Display for LabelKind { match self { Self::Local(s) => write!(f, "label {}", s), Self::Global(s) => write!(f, "label ->{}", s), - Self::Dynamic(id) => write!(f, "label =>{}", id.get_id()) + Self::Dynamic(id) => write!(f, "label =>{}", id.get_id()), } } } - /// A description of a relocation target. Used for error reporting. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum TargetKind { @@ -145,7 +146,6 @@ impl fmt::Display for TargetKind { } } - /// The various error types generated by dynasm functions. #[derive(Debug, Clone, PartialEq, Eq)] pub enum DynasmError { @@ -181,7 +181,6 @@ impl error::Error for DynasmError { } } - /// This trait represents the interface that must be implemented to allow /// the dynasm preprocessor to assemble into a datastructure. pub trait DynasmApi: Extend + for<'a> Extend<&'a u8> { @@ -235,65 +234,159 @@ pub trait DynasmApi: Extend + for<'a> Extend<&'a u8> { } /// This trait extends DynasmApi to not only allow assembling, but also labels and various directives -pub trait DynasmLabelApi : DynasmApi { - /// The relocation info type this assembler uses. +pub trait DynasmLabelApi: DynasmApi { + /// The relocation info type this assembler uses. type Relocation: Relocation; /// Record the definition of a local label - fn local_label( &mut self, name: &'static str); + fn local_label(&mut self, name: &'static str); /// Record the definition of a global label - fn global_label( &mut self, name: &'static str); + fn global_label(&mut self, name: &'static str); /// Record the definition of a dynamic label fn dynamic_label(&mut self, id: DynamicLabel); /// Record a relocation spot for a forward reference to a local label - fn forward_reloc( &mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: ::Encoding) { - self.forward_relocation(name, target_offset, field_offset, ref_offset, Self::Relocation::from_encoding(kind)) + fn forward_reloc( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: ::Encoding, + ) { + self.forward_relocation( + name, + target_offset, + field_offset, + ref_offset, + Self::Relocation::from_encoding(kind), + ) } /// Record a relocation spot for a backward reference to a local label - fn backward_reloc(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: ::Encoding) { - self.backward_relocation(name, target_offset, field_offset, ref_offset, Self::Relocation::from_encoding(kind)) + fn backward_reloc( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: ::Encoding, + ) { + self.backward_relocation( + name, + target_offset, + field_offset, + ref_offset, + Self::Relocation::from_encoding(kind), + ) } /// Record a relocation spot for a reference to a global label - fn global_reloc( &mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: ::Encoding) { - self.global_relocation(name, target_offset, field_offset, ref_offset, Self::Relocation::from_encoding(kind)) + fn global_reloc( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: ::Encoding, + ) { + self.global_relocation( + name, + target_offset, + field_offset, + ref_offset, + Self::Relocation::from_encoding(kind), + ) } /// Record a relocation spot for a reference to a dynamic label - fn dynamic_reloc( &mut self, id: DynamicLabel, target_offset: isize, field_offset: u8, ref_offset: u8, kind: ::Encoding) { - self.dynamic_relocation(id, target_offset, field_offset, ref_offset, Self::Relocation::from_encoding(kind)) + fn dynamic_reloc( + &mut self, + id: DynamicLabel, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: ::Encoding, + ) { + self.dynamic_relocation( + id, + target_offset, + field_offset, + ref_offset, + Self::Relocation::from_encoding(kind), + ) } /// Record a relocation spot to an arbitrary target. - fn bare_reloc(&mut self, target: usize, field_offset: u8, ref_offset: u8, kind: ::Encoding) { - self.bare_relocation(target, field_offset, ref_offset, Self::Relocation::from_encoding(kind)) + fn bare_reloc( + &mut self, + target: usize, + field_offset: u8, + ref_offset: u8, + kind: ::Encoding, + ) { + self.bare_relocation( + target, + field_offset, + ref_offset, + Self::Relocation::from_encoding(kind), + ) } /// Equivalent of forward_reloc, but takes a non-encoded relocation - fn forward_relocation( &mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: Self::Relocation); + fn forward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: Self::Relocation, + ); /// Equivalent of backward_reloc, but takes a non-encoded relocation - fn backward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: Self::Relocation); + fn backward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: Self::Relocation, + ); /// Equivalent of global_reloc, but takes a non-encoded relocation - fn global_relocation( &mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: Self::Relocation); + fn global_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: Self::Relocation, + ); /// Equivalent of dynamic_reloc, but takes a non-encoded relocation - fn dynamic_relocation( &mut self, id: DynamicLabel, target_offset: isize, field_offset: u8, ref_offset: u8, kind: Self::Relocation); + fn dynamic_relocation( + &mut self, + id: DynamicLabel, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: Self::Relocation, + ); /// Equivalent of bare_reloc, but takes a non-encoded relocation - fn bare_relocation(&mut self, target: usize, field_offset: u8, ref_offset: u8, kind: Self::Relocation); + fn bare_relocation( + &mut self, + target: usize, + field_offset: u8, + ref_offset: u8, + kind: Self::Relocation, + ); } - /// An assembler that is purely a `Vec`. It doesn't support labels or architecture-specific directives, /// but can be used to easily inspect generated code. It is intended to be used in testcases. #[derive(Debug, Clone)] pub struct SimpleAssembler { /// The assembling buffer. - pub ops: Vec + pub ops: Vec, } impl SimpleAssembler { /// Creates a new `SimpleAssembler`, containing an empty `Vec`. pub fn new() -> SimpleAssembler { - SimpleAssembler { - ops: Vec::new() - } + SimpleAssembler { ops: Vec::new() } } /// Use an `UncommittedModifier` to alter uncommitted code. @@ -308,13 +401,19 @@ impl SimpleAssembler { } impl Extend for SimpleAssembler { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.ops.extend(iter) } } impl<'a> Extend<&'a u8> for SimpleAssembler { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.ops.extend(iter) } } @@ -329,14 +428,13 @@ impl DynasmApi for SimpleAssembler { fn align(&mut self, alignment: usize, with: u8) { let offset = self.offset().0 % alignment; if offset != 0 { - for _ in offset .. alignment { + for _ in offset..alignment { self.push(with); } } } } - /// An assembler that assembles into a `Vec`, while supporting labels. To support the different types of relocations /// it requires a base address of the to be assembled code to be specified. #[derive(Debug)] @@ -356,7 +454,7 @@ impl VecAssembler { baseaddr, labels: LabelRegistry::new(), relocs: RelocRegistry::new(), - error: None + error: None, } } @@ -369,15 +467,21 @@ impl VecAssembler { /// `dynamic_labels` determines the preallocated space for dynamic labels definitions. /// `static_references` determines the preallocated space for references to local/global labels. /// `dynamic_references` determines the preallocated space for references to dynamic labels. - pub fn new_with_capacity(baseaddr: usize, ops_capacity: usize, - local_labels: usize, global_labels: usize, dynamic_labels: usize, - static_references: usize, dynamic_references: usize) -> VecAssembler { + pub fn new_with_capacity( + baseaddr: usize, + ops_capacity: usize, + local_labels: usize, + global_labels: usize, + dynamic_labels: usize, + static_references: usize, + dynamic_references: usize, + ) -> VecAssembler { VecAssembler { ops: Vec::with_capacity(ops_capacity), baseaddr, labels: LabelRegistry::with_capacity(local_labels, global_labels, dynamic_labels), relocs: RelocRegistry::with_capacity(static_references, dynamic_references), - error: None + error: None, } } @@ -404,13 +508,11 @@ impl VecAssembler { let target = self.labels.resolve_static(&label)?; let buf = &mut self.ops[loc.range(0)]; if loc.patch(buf, self.baseaddr, target.0).is_err() { - return Err(DynasmError::ImpossibleRelocation( - if label.is_global() { - TargetKind::Global(label.get_name()) - } else { - TargetKind::Local(label.get_name()) - } - )); + return Err(DynasmError::ImpossibleRelocation(if label.is_global() { + TargetKind::Global(label.get_name()) + } else { + TargetKind::Local(label.get_name()) + })); } } @@ -457,7 +559,7 @@ impl VecAssembler { } /// Equivalent of take, but instead of allocating a new vector it simply provides a draining iterator over the internal contents. - pub fn drain<'a>(&'a mut self) -> Result + 'a, DynasmError> { + pub fn drain<'a>(&'a mut self) -> Result + 'a, DynasmError> { self.commit()?; self.labels.clear(); Ok(self.ops.drain(..)) @@ -465,13 +567,19 @@ impl VecAssembler { } impl Extend for VecAssembler { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.ops.extend(iter) } } impl<'a, R: Relocation> Extend<&'a u8> for VecAssembler { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.ops.extend(iter) } } @@ -486,7 +594,7 @@ impl DynasmApi for VecAssembler { fn align(&mut self, alignment: usize, with: u8) { let offset = self.offset().0 % alignment; if offset != 0 { - for _ in offset .. alignment { + for _ in offset..alignment { self.push(with); } } @@ -500,7 +608,7 @@ impl DynasmLabelApi for VecAssembler { let offset = self.offset(); self.labels.define_local(name, offset); } - fn global_label( &mut self, name: &'static str) { + fn global_label(&mut self, name: &'static str) { let offset = self.offset(); if let Err(e) = self.labels.define_global(name, offset) { self.error = Some(e) @@ -512,24 +620,61 @@ impl DynasmLabelApi for VecAssembler { self.error = Some(e) } } - fn global_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + fn global_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = StaticLabel::global(name); - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn dynamic_relocation(&mut self, id: DynamicLabel, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn dynamic_relocation( + &mut self, + id: DynamicLabel, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); - self.relocs.add_dynamic(id, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn forward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_dynamic( + id, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn forward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = match self.labels.place_local_reference(name) { Some(label) => label.next(), None => StaticLabel::first(name), }; - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn backward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn backward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = match self.labels.place_local_reference(name) { Some(label) => label, @@ -538,19 +683,23 @@ impl DynasmLabelApi for VecAssembler { return; } }; - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); } fn bare_relocation(&mut self, target: usize, field_offset: u8, ref_offset: u8, kind: R) { let location = self.offset(); let loc = PatchLoc::new(location, 0, field_offset, ref_offset, kind); let buf = &mut self.ops[loc.range(0)]; if loc.patch(buf, self.baseaddr, target).is_err() { - self.error = Some(DynasmError::ImpossibleRelocation(TargetKind::Extern(target))) + self.error = Some(DynasmError::ImpossibleRelocation(TargetKind::Extern( + target, + ))) } } } - /// A full assembler implementation. Supports labels, all types of relocations, /// incremental compilation and multithreaded execution with simultaneous compilation. /// Its implementation guarantees no memory is executable and writable at the same time. @@ -573,7 +722,7 @@ impl Assembler { labels: LabelRegistry::new(), relocs: RelocRegistry::new(), managed: ManagedRelocs::new(), - error: None + error: None, }) } @@ -593,13 +742,17 @@ impl Assembler { /// no code can be executed as the relevant pages are remapped as writable. /// This API supports defining new labels/relocations, and overwriting previously defined relocations. pub fn alter(&mut self, f: F) -> Result - where F: FnOnce(&mut Modifier) -> O { + where + F: FnOnce(&mut Modifier) -> O, + { self.commit()?; // swap out a buffer from base let mut lock = self.memory.write(); let buffer = mem::replace(&mut *lock, ExecutableBuffer::default()); - let mut buffer = buffer.make_mut().expect("Could not swap buffer protection modes"); + let mut buffer = buffer + .make_mut() + .expect("Could not swap buffer protection modes"); // construct the modifier let mut modifier = Modifier { @@ -612,7 +765,7 @@ impl Assembler { old_managed: &mut self.managed, new_managed: ManagedRelocs::new(), - error: None + error: None, }; // execute the user code @@ -622,7 +775,9 @@ impl Assembler { modifier.encode_relocs()?; // repack the buffer - let buffer = buffer.make_exec().expect("Could not swap buffer protection modes"); + let buffer = buffer + .make_exec() + .expect("Could not swap buffer protection modes"); *lock = buffer; // call it a day @@ -637,16 +792,17 @@ impl Assembler { let managed = &self.managed; let error = &mut self.error; - self.memory.commit(&mut self.ops, |buffer, old_addr, new_addr| { - let change = new_addr.wrapping_sub(old_addr) as isize; + self.memory + .commit(&mut self.ops, |buffer, old_addr, new_addr| { + let change = new_addr.wrapping_sub(old_addr) as isize; - for reloc in managed.iter() { - let buf = &mut buffer[reloc.range(0)]; - if reloc.adjust(buf, change).is_err() { - *error = Some(DynasmError::ImpossibleRelocation(TargetKind::Managed)) + for reloc in managed.iter() { + let buf = &mut buffer[reloc.range(0)]; + if reloc.adjust(buf, change).is_err() { + *error = Some(DynasmError::ImpossibleRelocation(TargetKind::Managed)) + } } - } - }); + }); if let Some(e) = self.error.take() { return Err(e); @@ -657,20 +813,18 @@ impl Assembler { /// Finalize this assembler, returning the internal `ExecutableBuffer` if no `Executor` instances exist. /// This panics if any uncommitted changes caused errors near the end. To handle these, call `commit()` explicitly beforehand. pub fn finalize(mut self) -> Result { - self.commit().expect("Errors were encountered when committing before finalization"); + self.commit() + .expect("Errors were encountered when committing before finalization"); match self.memory.finalize() { Ok(execbuffer) => Ok(execbuffer), - Err(memory) => Err(Self { - memory, - ..self - }) + Err(memory) => Err(Self { memory, ..self }), } } /// Create an executor which can be used to execute code while still assembling code pub fn reader(&self) -> Executor { Executor { - execbuffer: self.memory.reader() + execbuffer: self.memory.reader(), } } @@ -700,13 +854,11 @@ impl Assembler { let target = self.labels.resolve_static(&label)?; let buf = &mut buf[loc.range(buf_offset)]; if loc.patch(buf, buf_addr, target.0).is_err() { - return Err(DynasmError::ImpossibleRelocation( - if label.is_global() { - TargetKind::Global(label.get_name()) - } else { - TargetKind::Local(label.get_name()) - } - )); + return Err(DynasmError::ImpossibleRelocation(if label.is_global() { + TargetKind::Global(label.get_name()) + } else { + TargetKind::Local(label.get_name()) + })); } if loc.needs_adjustment() { self.managed.add(loc) @@ -730,13 +882,19 @@ impl Assembler { } impl Extend for Assembler { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.ops.extend(iter) } } impl<'a, R: Relocation> Extend<&'a u8> for Assembler { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.ops.extend(iter) } } @@ -753,7 +911,7 @@ impl DynasmApi for Assembler { fn align(&mut self, alignment: usize, with: u8) { let misalign = self.offset().0 % alignment; if misalign != 0 { - for _ in misalign .. alignment { + for _ in misalign..alignment { self.push(with); } } @@ -767,7 +925,7 @@ impl DynasmLabelApi for Assembler { let offset = self.offset(); self.labels.define_local(name, offset); } - fn global_label( &mut self, name: &'static str) { + fn global_label(&mut self, name: &'static str) { let offset = self.offset(); if let Err(e) = self.labels.define_global(name, offset) { self.error = Some(e) @@ -779,24 +937,61 @@ impl DynasmLabelApi for Assembler { self.error = Some(e) } } - fn global_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + fn global_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = StaticLabel::global(name); - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn dynamic_relocation(&mut self, id: DynamicLabel, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn dynamic_relocation( + &mut self, + id: DynamicLabel, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); - self.relocs.add_dynamic(id, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn forward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_dynamic( + id, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn forward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = match self.labels.place_local_reference(name) { Some(label) => label.next(), None => StaticLabel::first(name), }; - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn backward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn backward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = match self.labels.place_local_reference(name) { Some(label) => label, @@ -805,21 +1000,28 @@ impl DynasmLabelApi for Assembler { return; } }; - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); } fn bare_relocation(&mut self, target: usize, field_offset: u8, ref_offset: u8, kind: R) { let location = self.offset(); let loc = PatchLoc::new(location, 0, field_offset, ref_offset, kind); let buf = &mut self.ops[loc.range(self.memory.committed())]; - if loc.patch(buf, self.memory.execbuffer_addr(), target).is_err() { - self.error = Some(DynasmError::ImpossibleRelocation(TargetKind::Extern(target))) + if loc + .patch(buf, self.memory.execbuffer_addr(), target) + .is_err() + { + self.error = Some(DynasmError::ImpossibleRelocation(TargetKind::Extern( + target, + ))) } else if loc.needs_adjustment() { self.managed.add(loc) } } } - /// Allows modification of already committed assembly code. Contains an internal cursor /// into the emitted assembly, initialized to the start, that can be moved around either with the /// `goto` function, or just by assembling new code into this `Modifier`. @@ -834,13 +1036,14 @@ pub struct Modifier<'a, R: Relocation> { old_managed: &'a mut ManagedRelocs, new_managed: ManagedRelocs, - error: Option + error: Option, } impl<'a, R: Relocation> Modifier<'a, R> { /// Move the modifier cursor to the selected location. pub fn goto(&mut self, offset: AssemblyOffset) { - self.old_managed.remove_between(self.previous_asmoffset, self.asmoffset); + self.old_managed + .remove_between(self.previous_asmoffset, self.asmoffset); self.asmoffset = offset.0; self.previous_asmoffset = offset.0; } @@ -877,13 +1080,11 @@ impl<'a, R: Relocation> Modifier<'a, R> { let target = self.labels.resolve_static(&label)?; let buf = &mut self.buffer[loc.range(0)]; if loc.patch(buf, buf_addr, target.0).is_err() { - return Err(DynasmError::ImpossibleRelocation( - if label.is_global() { - TargetKind::Global(label.get_name()) - } else { - TargetKind::Local(label.get_name()) - } - )); + return Err(DynasmError::ImpossibleRelocation(if label.is_global() { + TargetKind::Global(label.get_name()) + } else { + TargetKind::Local(label.get_name()) + })); } if loc.needs_adjustment() { self.new_managed.add(loc); @@ -902,7 +1103,8 @@ impl<'a, R: Relocation> Modifier<'a, R> { } } - self.old_managed.remove_between(self.previous_asmoffset, self.asmoffset); + self.old_managed + .remove_between(self.previous_asmoffset, self.asmoffset); self.previous_asmoffset = self.asmoffset; self.old_managed.append(&mut self.new_managed); @@ -911,9 +1113,15 @@ impl<'a, R: Relocation> Modifier<'a, R> { } } -impl<'a, R: Relocation> Extend for Modifier<'a,R> { - fn extend(&mut self, iter: T) where T: IntoIterator { - for (src, dst) in iter.into_iter().zip(self.buffer[self.asmoffset ..].iter_mut()) { +impl<'a, R: Relocation> Extend for Modifier<'a, R> { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for (src, dst) in iter + .into_iter() + .zip(self.buffer[self.asmoffset..].iter_mut()) + { *dst = src; self.asmoffset += 1; } @@ -921,8 +1129,14 @@ impl<'a, R: Relocation> Extend for Modifier<'a,R> { } impl<'a, 'b, R: Relocation> Extend<&'b u8> for Modifier<'a, R> { - fn extend(&mut self, iter: T) where T: IntoIterator { - for (src, dst) in iter.into_iter().zip(self.buffer[self.asmoffset ..].iter_mut()) { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for (src, dst) in iter + .into_iter() + .zip(self.buffer[self.asmoffset..].iter_mut()) + { *dst = *src; self.asmoffset += 1; } @@ -942,7 +1156,7 @@ impl<'a, R: Relocation> DynasmApi for Modifier<'a, R> { fn align(&mut self, alignment: usize, with: u8) { let mismatch = self.asmoffset % alignment; if mismatch != 0 { - for _ in mismatch .. alignment { + for _ in mismatch..alignment { self.push(with) } } @@ -956,7 +1170,7 @@ impl<'a, R: Relocation> DynasmLabelApi for Modifier<'a, R> { let offset = self.offset(); self.labels.define_local(name, offset); } - fn global_label( &mut self, name: &'static str) { + fn global_label(&mut self, name: &'static str) { let offset = self.offset(); if let Err(e) = self.labels.define_global(name, offset) { self.error = Some(e); @@ -968,24 +1182,61 @@ impl<'a, R: Relocation> DynasmLabelApi for Modifier<'a, R> { self.error = Some(e); } } - fn global_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + fn global_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = StaticLabel::global(name); - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn dynamic_relocation(&mut self, id: DynamicLabel, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn dynamic_relocation( + &mut self, + id: DynamicLabel, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); - self.relocs.add_dynamic(id, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn forward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_dynamic( + id, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn forward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = match self.labels.place_local_reference(name) { Some(label) => label.next(), None => StaticLabel::first(name), }; - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); - } - fn backward_relocation(&mut self, name: &'static str, target_offset: isize, field_offset: u8, ref_offset: u8, kind: R) { + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); + } + fn backward_relocation( + &mut self, + name: &'static str, + target_offset: isize, + field_offset: u8, + ref_offset: u8, + kind: R, + ) { let location = self.offset(); let label = match self.labels.place_local_reference(name) { Some(label) => label, @@ -994,22 +1245,26 @@ impl<'a, R: Relocation> DynasmLabelApi for Modifier<'a, R> { return; } }; - self.relocs.add_static(label, PatchLoc::new(location, target_offset, field_offset, ref_offset, kind)); + self.relocs.add_static( + label, + PatchLoc::new(location, target_offset, field_offset, ref_offset, kind), + ); } fn bare_relocation(&mut self, target: usize, field_offset: u8, ref_offset: u8, kind: R) { let location = self.offset(); let loc = PatchLoc::new(location, 0, field_offset, ref_offset, kind); - let buf_addr = self.buffer.as_ptr() as usize; + let buf_addr = self.buffer.as_ptr() as usize; let buf = &mut self.buffer[loc.range(0)]; if loc.patch(buf, buf_addr, target).is_err() { - self.error = Some(DynasmError::ImpossibleRelocation(TargetKind::Extern(target))); + self.error = Some(DynasmError::ImpossibleRelocation(TargetKind::Extern( + target, + ))); } else if loc.needs_adjustment() { self.new_managed.add(loc) } } } - /// This struct is a wrapper around an `Assembler` normally created using the /// `Assembler.alter_uncommitted` method. It allows the user to edit parts /// of the assembling buffer that cannot be determined easily or efficiently @@ -1019,7 +1274,7 @@ impl<'a, R: Relocation> DynasmLabelApi for Modifier<'a, R> { pub struct UncommittedModifier<'a> { buffer: &'a mut Vec, base_offset: usize, - offset: usize + offset: usize, } impl<'a> UncommittedModifier<'a> { @@ -1028,7 +1283,7 @@ impl<'a> UncommittedModifier<'a> { UncommittedModifier { buffer, base_offset: base_offset.0, - offset: base_offset.0 + offset: base_offset.0, } } @@ -1069,7 +1324,7 @@ impl<'a> DynasmApi for UncommittedModifier<'a> { fn align(&mut self, alignment: usize, with: u8) { let mismatch = self.offset % alignment; if mismatch != 0 { - for _ in mismatch .. alignment { + for _ in mismatch..alignment { self.push(with) } } @@ -1077,7 +1332,10 @@ impl<'a> DynasmApi for UncommittedModifier<'a> { } impl<'a> Extend for UncommittedModifier<'a> { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { for i in iter { self.push(i) } @@ -1085,7 +1343,10 @@ impl<'a> Extend for UncommittedModifier<'a> { } impl<'a, 'b> Extend<&'b u8> for UncommittedModifier<'a> { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.extend(iter.into_iter().cloned()) } } diff --git a/runtime/src/mmap.rs b/runtime/src/mmap.rs index 59e12456d3..9548e3869f 100644 --- a/runtime/src/mmap.rs +++ b/runtime/src/mmap.rs @@ -1,8 +1,8 @@ //! This module implements some wrappers around Mmap/MmapMut to also support a cheap "empty" variant. // Unfortunately Memmap itself doesn't support a cheap zero-length variant -use std::ops::{Deref, DerefMut}; use std::io; +use std::ops::{Deref, DerefMut}; use memmap2::{Mmap, MmapMut}; @@ -15,7 +15,7 @@ pub struct ExecutableBuffer { // length of the buffer that has actually been written to length: usize, // backing buffer - buffer: Option + buffer: Option, } /// ExecutableBuffer equivalent that holds a buffer of mutable memory instead of executable memory. It also derefs to a `&mut [u8]`. @@ -25,7 +25,7 @@ pub struct MutableBuffer { // length of the buffer that has actually been written to length: usize, // backing buffer - buffer: Option + buffer: Option, } impl ExecutableBuffer { @@ -50,10 +50,7 @@ impl ExecutableBuffer { Some(MmapMut::map_anon(size)?.make_exec()?) }; - Ok(ExecutableBuffer { - length: 0, - buffer - }) + Ok(ExecutableBuffer { length: 0, buffer }) } /// Query the backing size of this executable buffer @@ -71,7 +68,7 @@ impl ExecutableBuffer { Ok(MutableBuffer { length: self.length, - buffer + buffer, }) } } @@ -86,10 +83,7 @@ impl MutableBuffer { Some(MmapMut::map_anon(size)?) }; - Ok(MutableBuffer { - length: 0, - buffer - }) + Ok(MutableBuffer { length: 0, buffer }) } /// Query the backing size of this mutable buffer @@ -113,7 +107,7 @@ impl MutableBuffer { Ok(ExecutableBuffer { length: self.length, - buffer + buffer, }) } } @@ -122,7 +116,7 @@ impl Default for ExecutableBuffer { fn default() -> ExecutableBuffer { ExecutableBuffer { length: 0, - buffer: None + buffer: None, } } } @@ -131,7 +125,7 @@ impl Default for MutableBuffer { fn default() -> MutableBuffer { MutableBuffer { length: 0, - buffer: None + buffer: None, } } } diff --git a/runtime/src/relocations.rs b/runtime/src/relocations.rs index fb6faef9c9..42f9b1e586 100644 --- a/runtime/src/relocations.rs +++ b/runtime/src/relocations.rs @@ -6,8 +6,7 @@ use std::convert::TryFrom; /// Error returned when encoding a relocation failed #[derive(Debug)] -pub struct ImpossibleRelocation { } - +pub struct ImpossibleRelocation {} /// Used to inform assemblers on how to implement relocations for each architecture. /// When implementing a new architecture, one simply has to implement this trait for @@ -32,7 +31,6 @@ pub trait Relocation { fn page_size() -> usize; } - /// Specifies what kind of relocation a relocation is. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum RelocationKind { @@ -56,12 +54,11 @@ impl RelocationKind { 0 => Self::Relative, 1 => Self::AbsToRel, 2 => Self::RelToAbs, - x => panic!("Unsupported relocation kind {}", x) + x => panic!("Unsupported relocation kind {}", x), } } } - /// A descriptor for the size of a relocation. This also doubles as a relocation itself /// for relocations in data directives. Can be converted to relocations of any kind of architecture /// using `Relocation::from_size`. @@ -85,7 +82,7 @@ impl Relocation for RelocationSize { 2 => RelocationSize::Word, 4 => RelocationSize::DWord, 8 => RelocationSize::QWord, - x => panic!("Unsupported relocation size {}", x) + x => panic!("Unsupported relocation size {}", x), } } fn from_size(size: RelocationSize) -> Self { @@ -96,17 +93,20 @@ impl Relocation for RelocationSize { } fn write_value(&self, buf: &mut [u8], value: isize) -> Result<(), ImpossibleRelocation> { match self { - RelocationSize::Byte => buf[0] = - i8::try_from(value).map_err(|_| ImpossibleRelocation { } )? - as u8, - RelocationSize::Word => LittleEndian::write_i16(buf, - i16::try_from(value).map_err(|_| ImpossibleRelocation { } )? + RelocationSize::Byte => { + buf[0] = i8::try_from(value).map_err(|_| ImpossibleRelocation {})? as u8 + } + RelocationSize::Word => LittleEndian::write_i16( + buf, + i16::try_from(value).map_err(|_| ImpossibleRelocation {})?, ), - RelocationSize::DWord => LittleEndian::write_i32(buf, - i32::try_from(value).map_err(|_| ImpossibleRelocation { } )? + RelocationSize::DWord => LittleEndian::write_i32( + buf, + i32::try_from(value).map_err(|_| ImpossibleRelocation {})?, ), - RelocationSize::QWord => LittleEndian::write_i64(buf, - i64::try_from(value).map_err(|_| ImpossibleRelocation { } )? + RelocationSize::QWord => LittleEndian::write_i64( + buf, + i64::try_from(value).map_err(|_| ImpossibleRelocation {})?, ), } Ok(()) diff --git a/runtime/src/x64.rs b/runtime/src/x64.rs index 9a0fd73a04..1d86f69dcf 100644 --- a/runtime/src/x64.rs +++ b/runtime/src/x64.rs @@ -13,18 +13,17 @@ //! //! ## Enums //! -//! There are enumerator of every logically distinct register family usable in x64. +//! There are enumerator of every logically distinct register family usable in x64. //! These enums implement the [`Register`] trait and their discriminant values match their numeric encoding in dynamic register literals. //! Some of these are re-exported from the x86 architecture. //! //! *Note: The presence of some registers listed here is purely what is encodable. Check the relevant architecture documentation to find what is architecturally valid.* -use crate::relocations::{Relocation, RelocationSize, RelocationKind, ImpossibleRelocation}; +use crate::relocations::{ImpossibleRelocation, Relocation, RelocationKind, RelocationSize}; use crate::Register; use std::hash::Hash; - /// Relocation implementation for the x64 architecture. #[derive(Debug, Clone)] pub struct X64Relocation { @@ -39,9 +38,7 @@ impl Relocation for X64Relocation { } } fn from_size(size: RelocationSize) -> Self { - Self { - size, - } + Self { size } } fn size(&self) -> usize { self.size.size() @@ -67,17 +64,28 @@ pub type AssemblyModifier<'a> = crate::Modifier<'a, X64Relocation>; /// An x64 UncommittedModifier. This is aliased here for backwards compatability. pub type UncommittedModifier<'a> = crate::UncommittedModifier<'a>; - /// 1, 2, 4 or 8-byte general purpose "quad-word" registers. /// /// RIP does not appear here as it cannot be addressed dynamically. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rq { - RAX = 0x0, RCX = 0x1, RDX = 0x2, RBX = 0x3, - RSP = 0x4, RBP = 0x5, RSI = 0x6, RDI = 0x7, - R8 = 0x8, R9 = 0x9, R10 = 0xA, R11 = 0xB, - R12 = 0xC, R13 = 0xD, R14 = 0xE, R15 = 0xF, + RAX = 0x0, + RCX = 0x1, + RDX = 0x2, + RBX = 0x3, + RSP = 0x4, + RBP = 0x5, + RSI = 0x6, + RDI = 0x7, + R8 = 0x8, + R9 = 0x9, + R10 = 0xA, + R11 = 0xB, + R12 = 0xC, + R13 = 0xD, + R14 = 0xE, + R15 = 0xF, } reg_impls!(Rq); @@ -85,10 +93,22 @@ reg_impls!(Rq); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rx { - XMM0 = 0x0, XMM1 = 0x1, XMM2 = 0x2, XMM3 = 0x3, - XMM4 = 0x4, XMM5 = 0x5, XMM6 = 0x6, XMM7 = 0x7, - XMM8 = 0x8, XMM9 = 0x9, XMM10 = 0xA, XMM11 = 0xB, - XMM12 = 0xC, XMM13 = 0xD, XMM14 = 0xE, XMM15 = 0xF, + XMM0 = 0x0, + XMM1 = 0x1, + XMM2 = 0x2, + XMM3 = 0x3, + XMM4 = 0x4, + XMM5 = 0x5, + XMM6 = 0x6, + XMM7 = 0x7, + XMM8 = 0x8, + XMM9 = 0x9, + XMM10 = 0xA, + XMM11 = 0xB, + XMM12 = 0xC, + XMM13 = 0xD, + XMM14 = 0xE, + XMM15 = 0xF, } reg_impls!(Rx); @@ -96,15 +116,27 @@ reg_impls!(Rx); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RC { - CR0 = 0x0, CR1 = 0x1, CR2 = 0x2, CR3 = 0x3, - CR4 = 0x4, CR5 = 0x5, CR6 = 0x6, CR7 = 0x7, - CR8 = 0x8, CR9 = 0x9, CR10 = 0xA, CR11 = 0xB, - CR12 = 0xC, CR13 = 0xD, CR14 = 0xE, CR15 = 0xF, + CR0 = 0x0, + CR1 = 0x1, + CR2 = 0x2, + CR3 = 0x3, + CR4 = 0x4, + CR5 = 0x5, + CR6 = 0x6, + CR7 = 0x7, + CR8 = 0x8, + CR9 = 0x9, + CR10 = 0xA, + CR11 = 0xB, + CR12 = 0xC, + CR13 = 0xD, + CR14 = 0xE, + CR15 = 0xF, } reg_impls!(RC); // The other register families are the same as 32-bit X86. (Although access size for Debug regs is 8-byte) -pub use crate::x86::{Rh, Rf, Rm, Rs, RD, RB}; +pub use crate::x86::{Rf, Rh, Rm, Rs, RB, RD}; #[cfg(test)] mod tests { diff --git a/runtime/src/x86.rs b/runtime/src/x86.rs index b20e0bae6f..e2f75ddee2 100644 --- a/runtime/src/x86.rs +++ b/runtime/src/x86.rs @@ -13,15 +13,13 @@ //! //! ## Enums //! -//! There are enumerator of every logically distinct register family usable in x86. +//! There are enumerator of every logically distinct register family usable in x86. //! These enums implement the [`Register`] trait and their discriminant values match their numeric encoding in dynamic register literals. //! //! *Note: The presence of some registers listed here is purely what is encodable. Check the relevant architecture documentation to find what is architecturally valid.* - +use crate::relocations::{ImpossibleRelocation, Relocation, RelocationKind, RelocationSize}; use crate::Register; -use crate::relocations::{Relocation, RelocationSize, RelocationKind, ImpossibleRelocation}; - /// Relocation implementation for the x86 architecture. #[derive(Debug, Clone)] @@ -61,7 +59,6 @@ impl Relocation for X86Relocation { } } - /// An x86 Assembler. This is aliased here for backwards compatability. pub type Assembler = crate::Assembler; /// An x86 AssemblyModifier. This is aliased here for backwards compatability. @@ -69,15 +66,20 @@ pub type AssemblyModifier<'a> = crate::Modifier<'a, X86Relocation>; /// An x86 UncommittedModifier. This is aliased here for backwards compatability. pub type UncommittedModifier<'a> = crate::UncommittedModifier<'a>; - /// 1, 2 or 4-byte general purpose "double-word" registers. /// /// EIP does not appear here as it cannot be addressed dynamically. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rd { - EAX = 0x00, ECX = 0x01, EDX = 0x02, EBX = 0x03, - ESP = 0x04, EBP = 0x05, ESI = 0x06, EDI = 0x07, + EAX = 0x00, + ECX = 0x01, + EDX = 0x02, + EBX = 0x03, + ESP = 0x04, + EBP = 0x05, + ESI = 0x06, + EDI = 0x07, } reg_impls!(Rd); @@ -85,7 +87,10 @@ reg_impls!(Rd); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rh { - AH = 0x4, CH = 0x5, DH = 0x6, BH = 0x7, + AH = 0x4, + CH = 0x5, + DH = 0x6, + BH = 0x7, } reg_impls!(Rh); @@ -93,8 +98,14 @@ reg_impls!(Rh); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rf { - ST0 = 0x0, ST1 = 0x1, ST2 = 0x2, ST3 = 0x3, - ST4 = 0x4, ST5 = 0x5, ST6 = 0x6, ST7 = 0x7, + ST0 = 0x0, + ST1 = 0x1, + ST2 = 0x2, + ST3 = 0x3, + ST4 = 0x4, + ST5 = 0x5, + ST6 = 0x6, + ST7 = 0x7, } reg_impls!(Rf); @@ -102,8 +113,14 @@ reg_impls!(Rf); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rm { - MMX0 = 0x0, MMX1 = 0x1, MMX2 = 0x2, MMX3 = 0x3, - MMX4 = 0x4, MMX5 = 0x5, MMX6 = 0x6, MMX7 = 0x7, + MMX0 = 0x0, + MMX1 = 0x1, + MMX2 = 0x2, + MMX3 = 0x3, + MMX4 = 0x4, + MMX5 = 0x5, + MMX6 = 0x6, + MMX7 = 0x7, } reg_impls!(Rm); @@ -111,8 +128,14 @@ reg_impls!(Rm); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rx { - XMM0 = 0x0, XMM1 = 0x1, XMM2 = 0x2, XMM3 = 0x3, - XMM4 = 0x4, XMM5 = 0x5, XMM6 = 0x6, XMM7 = 0x7, + XMM0 = 0x0, + XMM1 = 0x1, + XMM2 = 0x2, + XMM3 = 0x3, + XMM4 = 0x4, + XMM5 = 0x5, + XMM6 = 0x6, + XMM7 = 0x7, } reg_impls!(Rx); @@ -120,8 +143,12 @@ reg_impls!(Rx); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Rs { - ES = 0x0, CS = 0x1, SS = 0x2, DS = 0x3, - FS = 0x4, GS = 0x5, + ES = 0x0, + CS = 0x1, + SS = 0x2, + DS = 0x3, + FS = 0x4, + GS = 0x5, } reg_impls!(Rs); @@ -129,8 +156,14 @@ reg_impls!(Rs); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RC { - CR0 = 0x0, CR1 = 0x1, CR2 = 0x2, CR3 = 0x3, - CR4 = 0x4, CR5 = 0x5, CR6 = 0x6, CR7 = 0x7, + CR0 = 0x0, + CR1 = 0x1, + CR2 = 0x2, + CR3 = 0x3, + CR4 = 0x4, + CR5 = 0x5, + CR6 = 0x6, + CR7 = 0x7, } reg_impls!(RC); @@ -138,10 +171,22 @@ reg_impls!(RC); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RD { - DR0 = 0x0, DR1 = 0x1, DR2 = 0x2, DR3 = 0x3, - DR4 = 0x4, DR5 = 0x5, DR6 = 0x6, DR7 = 0x7, - DR8 = 0x8, DR9 = 0x9, DR10 = 0xA, DR11 = 0xB, - DR12 = 0xC, DR13 = 0xD, DR14 = 0xE, DR15 = 0xF, + DR0 = 0x0, + DR1 = 0x1, + DR2 = 0x2, + DR3 = 0x3, + DR4 = 0x4, + DR5 = 0x5, + DR6 = 0x6, + DR7 = 0x7, + DR8 = 0x8, + DR9 = 0x9, + DR10 = 0xA, + DR11 = 0xB, + DR12 = 0xC, + DR13 = 0xD, + DR14 = 0xE, + DR15 = 0xF, } reg_impls!(RD); @@ -149,7 +194,10 @@ reg_impls!(RD); #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RB { - BND0 = 0x0, BND1 = 0x1, BND2 = 0x2, BND3 = 0x3 + BND0 = 0x0, + BND1 = 0x1, + BND2 = 0x2, + BND3 = 0x3, } reg_impls!(RB); diff --git a/testing/tests/aarch64_0.rs b/testing/tests/aarch64_0.rs index 4fbe3e5499..d612436fbb 100644 --- a/testing/tests/aarch64_0.rs +++ b/testing/tests/aarch64_0.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_0.rs.gen"); - diff --git a/testing/tests/aarch64_2.rs b/testing/tests/aarch64_2.rs index ae0a93f1db..8bd3f8e913 100644 --- a/testing/tests/aarch64_2.rs +++ b/testing/tests/aarch64_2.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_2.rs.gen"); - diff --git a/testing/tests/aarch64_3.rs b/testing/tests/aarch64_3.rs index 0fdcaf7b7d..e357b00779 100644 --- a/testing/tests/aarch64_3.rs +++ b/testing/tests/aarch64_3.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_3.rs.gen"); - diff --git a/testing/tests/aarch64_4.rs b/testing/tests/aarch64_4.rs index ffc9415057..113524a446 100644 --- a/testing/tests/aarch64_4.rs +++ b/testing/tests/aarch64_4.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_4.rs.gen"); - diff --git a/testing/tests/aarch64_5.rs b/testing/tests/aarch64_5.rs index ee62f2dbd7..54848eb8da 100644 --- a/testing/tests/aarch64_5.rs +++ b/testing/tests/aarch64_5.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_5.rs.gen"); - diff --git a/testing/tests/aarch64_6.rs b/testing/tests/aarch64_6.rs index 42d60efc80..1c2321cbaa 100644 --- a/testing/tests/aarch64_6.rs +++ b/testing/tests/aarch64_6.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_6.rs.gen"); - diff --git a/testing/tests/aarch64_7.rs b/testing/tests/aarch64_7.rs index a7b6574bca..57eaa83c63 100644 --- a/testing/tests/aarch64_7.rs +++ b/testing/tests/aarch64_7.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_7.rs.gen"); - diff --git a/testing/tests/aarch64_8.rs b/testing/tests/aarch64_8.rs index 2a4699d4e7..07705f9296 100644 --- a/testing/tests/aarch64_8.rs +++ b/testing/tests/aarch64_8.rs @@ -4,4 +4,3 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; include!("gen_aarch64/aarch64_tests_8.rs.gen"); - diff --git a/testing/tests/bugreports.rs b/testing/tests/bugreports.rs index 2ced2429ad..611fd13b72 100644 --- a/testing/tests/bugreports.rs +++ b/testing/tests/bugreports.rs @@ -18,7 +18,10 @@ fn bugreport_1() { let buf = ops.finalize().unwrap(); let hex: Vec = buf.iter().map(|x| format!("0x{:02X}", *x)).collect(); let hex: String = hex.join(", "); - assert_eq!(hex, "0xCD, 0x03, 0x49, 0x89, 0xF8, 0x49, 0x83, 0xC0, 0x01, 0x4C, 0x89, 0xC0, 0xC3", "bugreport_1"); + assert_eq!( + hex, "0xCD, 0x03, 0x49, 0x89, 0xF8, 0x49, 0x83, 0xC0, 0x01, 0x4C, 0x89, 0xC0, 0xC3", + "bugreport_1" + ); } // ensure RBP/RSP can be used as dynamic base register by always emitting the full SIB byte and a displacement @@ -66,7 +69,10 @@ fn bugreport_3() { let buf = ops.finalize().unwrap(); let hex: Vec = buf.iter().map(|x| format!("0x{:02X}", *x)).collect(); let hex: String = hex.join(", "); - assert_eq!(hex, "0xC4, 0xE1, 0x6B, 0x58, 0xCB, 0xC4, 0x41, 0x33, 0x58, 0xD3", "bugreport_3"); + assert_eq!( + hex, "0xC4, 0xE1, 0x6B, 0x58, 0xCB, 0xC4, 0x41, 0x33, 0x58, 0xD3", + "bugreport_3" + ); } // overflow in logical immediate encoding diff --git a/testing/tests/complex1.rs b/testing/tests/complex1.rs index 182d9db9ca..331fd823ca 100644 --- a/testing/tests/complex1.rs +++ b/testing/tests/complex1.rs @@ -145,16 +145,20 @@ fn complex1() { #[allow(dead_code)] struct Test { foo: i32, - bar: u32 + bar: u32, } #[allow(dead_code)] struct SmallTest { foo: i8, - bar: u8 + bar: u8, } - let mut test_array = [Test {foo: 1, bar: 2}, Test {foo: 3, bar: 4}, Test {foo: 5, bar: 6}]; + let mut test_array = [ + Test { foo: 1, bar: 2 }, + Test { foo: 3, bar: 4 }, + Test { foo: 5, bar: 6 }, + ]; let test_array = &mut test_array; - let mut test_single = Test {foo: 7, bar: 8}; + let mut test_single = Test { foo: 7, bar: 8 }; let test_single = &mut test_single; my_dynasm!(ops ; mov rax, AWORD MutPointer!(test_array) @@ -176,7 +180,7 @@ fn complex1() { // dynasm in expr position match 1 { 0 => (), - _ => dynasm!(ops; inc rax) + _ => dynasm!(ops; inc rax), } // fixups @@ -191,7 +195,8 @@ fn complex1() { ; inc r12 ); ops.check(end).unwrap(); - }).unwrap(); + }) + .unwrap(); let index = ops.offset(); my_dynasm!(ops @@ -208,7 +213,7 @@ fn complex1() { println!(""); let func: extern "C" fn() -> i64 = unsafe { std::mem::transmute(buf.ptr(index)) }; - println!("assembled function result: {}", func() ); + println!("assembled function result: {}", func()); } #[test] diff --git a/testing/tests/relocations.rs b/testing/tests/relocations.rs index 29fdefded3..a144b6f767 100644 --- a/testing/tests/relocations.rs +++ b/testing/tests/relocations.rs @@ -1,8 +1,7 @@ #![allow(unused_imports)] use dynasmrt::dynasm; -use dynasmrt::{DynasmApi, DynasmLabelApi, DynasmError, LabelKind, DynamicLabel}; - +use dynasmrt::{DynamicLabel, DynasmApi, DynasmError, DynasmLabelApi, LabelKind}; #[test] fn test_local_jumps() { @@ -59,31 +58,30 @@ fn test_local_jumps() { assert!(&output == expected); } - #[test] fn test_global_jumps() { let mut ops = dynasmrt::VecAssembler::::new(0); dynasm!(ops - ; jmp BYTE ->minusone - ; jmp BYTE ->plustwo - ; jmp BYTE ->minusone - ;->start: - ; inc rax - ;->plusone: - ; inc rbx - ;->plustwo: - ; jmp BYTE ->end - ; jmp BYTE ->start - ;->minustwo: - ; inc rcx - ;->minusone: - ; inc rdx - ;->end: - ; jmp BYTE ->plusone - ; jmp BYTE ->minustwo - ; jmp BYTE ->plusone - ); + ; jmp BYTE ->minusone + ; jmp BYTE ->plustwo + ; jmp BYTE ->minusone + ;->start: + ; inc rax + ;->plusone: + ; inc rbx + ;->plustwo: + ; jmp BYTE ->end + ; jmp BYTE ->start + ;->minustwo: + ; inc rcx + ;->minusone: + ; inc rdx + ;->end: + ; jmp BYTE ->plusone + ; jmp BYTE ->minustwo + ; jmp BYTE ->plusone + ); let output = ops.finalize().unwrap(); @@ -98,7 +96,6 @@ fn test_global_jumps() { assert!(&output == expected); } - #[test] fn test_dynamic_jumps() { let mut ops = dynasmrt::VecAssembler::::new(0); @@ -143,7 +140,6 @@ fn test_dynamic_jumps() { assert!(&output == expected); } - #[test] fn test_all_jumps() { let mut ops = dynasmrt::VecAssembler::::new(0); @@ -186,7 +182,6 @@ fn test_all_jumps() { assert!(&output == expected); } - #[test] fn test_bad_jumps() { // forward jump to a backwards label @@ -251,6 +246,6 @@ fn test_bad_jumps() { ); match ops.finalize() { Err(DynasmError::UnknownLabel(LabelKind::Dynamic(_))) => (), - _ => assert!(false) + _ => assert!(false), } }