From 404cb6e81988ed84a75c89d67bf324409e22a390 Mon Sep 17 00:00:00 2001 From: iximeow Date: Sat, 3 Jul 2021 23:52:58 -0700 Subject: update protected_mode to match long_mode docs, apis --- src/long_mode/mod.rs | 29 ++----- src/protected_mode/mod.rs | 188 +++++++++++++++++++++++++++++++++++++--------- src/protected_mode/vex.rs | 134 ++++++++++++++++----------------- 3 files changed, 225 insertions(+), 126 deletions(-) (limited to 'src') diff --git a/src/long_mode/mod.rs b/src/long_mode/mod.rs index 886dd1c..600a81a 100644 --- a/src/long_mode/mod.rs +++ b/src/long_mode/mod.rs @@ -2838,7 +2838,7 @@ pub struct InstDecoder { // 2. monitor (intel-only?) // 3. vmx (some atom chips still lack it) // 4. fma3 (intel haswell/broadwell+, amd piledriver+) - // 5. cmpxchg16b (some amd are missingt this one) + // 5. cmpxchg16b (some amd are missing this one) // 6. sse4.1 // 7. sse4.2 // 8. movbe @@ -3705,14 +3705,12 @@ impl InstDecoder { Opcode::AESKEYGENASSIST => { // via Intel section 5.12. AESNI AND PCLMULQDQ if !self.aesni() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::PCLMULQDQ => { // via Intel section 5.12. AESNI AND PCLMULQDQ if !self.pclmulqdq() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -3721,7 +3719,6 @@ impl InstDecoder { Opcode::XEND | Opcode::XTEST => { if !self.tsx() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -3733,7 +3730,6 @@ impl InstDecoder { Opcode::SHA256MSG2 | Opcode::SHA256RNDS2 => { if !self.sha() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -3741,7 +3737,6 @@ impl InstDecoder { Opcode::ENCLS | Opcode::ENCLU => { if !self.sgx() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4090,7 +4085,6 @@ impl InstDecoder { Opcode::VSTMXCSR => { // TODO: check a table for these if !self.avx() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4102,13 +4096,11 @@ impl InstDecoder { Opcode::VAESKEYGENASSIST => { // TODO: check a table for these if !self.avx() || !self.aesni() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::MOVBE => { if !self.movbe() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4131,7 +4123,6 @@ impl InstDecoder { * the less quirky default, so `intel_quirks` is considered the outlier, and * before this default. * */ - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4150,17 +4141,14 @@ impl InstDecoder { * so that's considered the less-quirky (default) case here. * */ if self.amd_quirks() && !self.abm() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else if !self.lzcnt() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::ADCX | Opcode::ADOX => { if !self.adx() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4171,21 +4159,18 @@ impl InstDecoder { Opcode::VMMCALL | Opcode::INVLPGA => { if !self.svm() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::STGI | Opcode::SKINIT => { if !self.svm() || !self.skinit() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::LAHF | Opcode::SAHF => { if !self.lahfsahf() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4207,19 +4192,16 @@ impl InstDecoder { * EVEX.512-coded. */ if !self.avx() || !self.f16c() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::RDRAND => { if !self.rdrand() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } Opcode::RDSEED => { if !self.rdseed() { - inst.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } } @@ -4587,19 +4569,19 @@ impl Prefixes { #[inline] pub fn rep(&self) -> bool { self.bits & 0x30 == 0x10 } #[inline] - fn set_rep(&mut self) { self.bits = (self.bits & 0xcf) | 0x10; } + fn set_rep(&mut self) { self.bits = (self.bits & 0xcf) | 0x10 } #[inline] pub fn repnz(&self) -> bool { self.bits & 0x30 == 0x30 } #[inline] - fn set_repnz(&mut self) { self.bits = (self.bits & 0xcf) | 0x30; } + fn set_repnz(&mut self) { self.bits = (self.bits & 0xcf) | 0x30 } #[inline] pub fn rep_any(&self) -> bool { self.bits & 0x30 != 0x00 } #[inline] fn operand_size(&self) -> bool { self.bits & 0x1 == 1 } #[inline] - fn set_operand_size(&mut self) { self.bits = self.bits | 0x1; } + fn set_operand_size(&mut self) { self.bits = self.bits | 0x1 } #[inline] - fn unset_operand_size(&mut self) { self.bits = self.bits & !0x1; } + fn unset_operand_size(&mut self) { self.bits = self.bits & !0x1 } #[inline] fn address_size(&self) -> bool { self.bits & 0x2 == 2 } #[inline] @@ -7271,7 +7253,6 @@ fn read_instr::Address, u8 { self.num } + /// the class of register this register is in. + /// + /// this corresponds to the register's size, but is by the register's usage in the instruction + /// set; `rax` and `mm0` are the same size, but different classes (`Q`(word) and `MM` (mmx) + /// respectively). pub fn class(&self) -> RegisterClass { RegisterClass { kind: self.bank } } @@ -78,6 +106,7 @@ impl RegSpec { display::regspec_label(self) } + /// construct a `RegSpec` for x87 register `st(num)` #[inline] pub fn st(num: u8) -> RegSpec { if num >= 8 { @@ -379,6 +408,10 @@ enum SizeCode { vd, } +/// an operand for an `x86` instruction. +/// +/// `Operand::Nothing` should be unreachable in practice; any such instructions should have an +/// operand count of 0 (or at least one fewer than the `Nothing` operand's position). #[derive(Clone, Debug, PartialEq)] #[non_exhaustive] pub enum Operand { @@ -485,7 +518,7 @@ impl OperandSpec { o => o, } } - pub fn is_memory(&self) -> bool { + fn is_memory(&self) -> bool { match self { OperandSpec::DispU16 | OperandSpec::DispU32 | @@ -529,6 +562,11 @@ impl OperandSpec { } } } +/// an `avx512` merging mode. +/// +/// the behavior for non-`avx512` instructions is equivalent to `merge`. `zero` is only useful in +/// conjunction with a mask register, where bits specified in the mask register correspond to +/// unmodified items in the instruction's desination. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum MergeMode { Merge, @@ -543,6 +581,7 @@ impl From for MergeMode { } } } +/// an `avx512` custom rounding mode. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum SaeMode { RoundNearest, @@ -557,6 +596,16 @@ const SAE_MODES: [SaeMode; 4] = [ SaeMode::RoundZero, ]; impl SaeMode { + /// a human-friendly label for this `SaeMode`: + /// + /// ``` + /// use yaxpeax_x86::long_mode::SaeMode; + /// + /// assert_eq!(SaeMode::RoundNearest.label(), "{rne-sae}"); + /// assert_eq!(SaeMode::RoundDown.label(), "{rd-sae}"); + /// assert_eq!(SaeMode::RoundUp.label(), "{ru-sae}"); + /// assert_eq!(SaeMode::RoundZero.label(), "{rz-sae}"); + /// ``` pub fn label(&self) -> &'static str { match self { SaeMode::RoundNearest => "{rne-sae}", @@ -722,6 +771,10 @@ impl Operand { } } } + /// returns `true` if this operand implies a memory access, `false` otherwise. + /// + /// notably, the `lea` instruction uses a memory operand without actually ever accessing + /// memory. pub fn is_memory(&self) -> bool { match self { Operand::DisplacementU16(_) | @@ -799,6 +852,10 @@ fn operand_size() { // assert_eq!(core::mem::size_of::(), 40); } +/// an `x86` register class - `qword`, `dword`, `xmmword`, `segment`, and so on. +/// +/// this is mostly useful for comparing a `RegSpec`'s [`RegSpec::class()`] with a constant out of +/// [`register_class`]. #[cfg_attr(feature="use-serde", derive(Serialize, Deserialize))] #[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)] pub struct RegisterClass { @@ -970,6 +1027,11 @@ enum RegisterBank { K = 20, // AVX512 mask registers } +/// the segment register used by the corresponding instruction. +/// +/// typically this will be `ds` but can be overridden. some instructions have specific segment +/// registers used regardless of segment prefixes, and in these cases `yaxpeax-x86` will report the +/// actual segment register a physical processor would use. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum Segment { DS = 0, CS, ES, FS, GS, SS @@ -2533,7 +2595,12 @@ impl PartialEq for Instruction { } } -#[derive(Debug, Clone, Eq)] +/// an `x86` instruction. +/// +/// typically an opcode will be inspected by [`Instruction::opcode()`], and an instruction has +/// [`Instruction::operand_count()`] many operands. operands are provided by +/// [`Instruction::operand()`]. +#[derive(Debug, Clone, Copy, Eq)] pub struct Instruction { pub prefixes: Prefixes, /* @@ -2649,8 +2716,10 @@ enum OperandSpec { // Foo for T == x86. This is only to access associated types // which themselves are bounded, but their #[derive] require T to // implement these traits. +/// a trivial struct for `yaxpeax_arch::Arch` to be implemented on. it's only interesting for the +/// associated type parameters. #[cfg_attr(feature="use-serde", derive(Serialize, Deserialize))] -#[derive(Hash, Eq, PartialEq, Debug)] +#[derive(Hash, Eq, PartialEq, Debug, Copy, Clone)] #[allow(non_camel_case_types)] pub struct Arch; @@ -2675,7 +2744,15 @@ impl LengthedInstruction for Instruction { } } -#[derive(PartialEq)] +/// an `x86` instruction decoder. +/// +/// fundamentally this is one or two primitives with no additional state kept during decoding. it +/// can be copied cheaply, hashed cheaply, compared cheaply. if you really want to share an +/// `InstDecoder` between threads, you could - but you might want to clone it instead. +/// +/// unless you're using an `Arc>`, which is _fine_ but i'd be very curious about +/// the design requiring that. +#[derive(PartialEq, Copy, Clone, Eq, Hash, PartialOrd, Ord)] pub struct InstDecoder { // extensions tracked here: // 0. SSE3 @@ -3171,6 +3248,7 @@ impl InstDecoder { self } + /// returns `true` if this `InstDecoder` has **all** `avx512` features enabled. pub fn avx512(&self) -> bool { let avx512_mask = (1 << 19) | @@ -3191,6 +3269,9 @@ impl InstDecoder { (self.flags & avx512_mask) == avx512_mask } + /// enable all `avx512` features on this `InstDecoder`. no real CPU, at time of writing, + /// actually has such a feature comination, but this is a useful overestimate for `avx512` + /// generally. pub fn with_avx512(mut self) -> Self { let avx512_mask = (1 << 19) | @@ -4114,6 +4195,8 @@ impl Decoder for InstDecoder { } impl Opcode { + /// get the [`ConditionCode`] for this instruction, if it is in fact conditional. x86's + /// conditional instructions are `Jcc`, `CMOVcc`, andd `SETcc`. pub fn condition(&self) -> Option { match self { Opcode::JO | @@ -4176,6 +4259,7 @@ impl Default for Instruction { } impl Instruction { + /// get the `Opcode` of this instruction. pub fn opcode(&self) -> Opcode { self.opcode } @@ -4209,6 +4293,11 @@ impl Instruction { } } + /// get the memory access information for this instruction, if it accesses memory. + /// + /// the corresponding `MemoryAccessSize` may report that the size of accessed memory is + /// indeterminate; this is the case for `xsave/xrestor`-style instructions whose operation size + /// varies based on physical processor. pub fn mem_size(&self) -> Option { if self.mem_size != 0 { Some(MemoryAccessSize { size: self.mem_size }) @@ -4222,7 +4311,7 @@ impl Instruction { pub fn invalid() -> Instruction { Instruction { prefixes: Prefixes::new(0), - opcode: Opcode::Invalid, + opcode: Opcode::NOP, mem_size: 0, regs: [RegSpec::eax(); 4], scale: 0, @@ -4234,13 +4323,10 @@ impl Instruction { } } - pub fn is_invalid(&self) -> bool { - match self.opcode { - Opcode::Invalid => true, - _ => false - } - } - + /// get the `Segment` that will *actually* be used for accessing the operand at index `i`. + /// + /// `stos`, `lods`, `movs`, and `cmps` specifically name some segments for use regardless of + /// prefixes. pub fn segment_override_for_op(&self, op: u8) -> Option { match self.opcode { Opcode::STOS => { @@ -4288,6 +4374,18 @@ impl Instruction { } #[cfg(feature = "fmt")] + /// wrap a reference to this instruction with a `DisplayStyle` to format the instruction with + /// later. see the documentation on [`display::DisplayStyle`] for more. + /// + /// ``` + /// use yaxpeax_x86::long_mode::{InstDecoder, DisplayStyle}; + /// + /// let decoder = InstDecoder::default(); + /// let inst = decoder.decode_slice(&[0x33, 0xc1]).unwrap(); + /// + /// assert_eq!("eax ^= ecx", inst.display_with(DisplayStyle::C).to_string()); + /// assert_eq!("xor eax, ecx", inst.display_with(DisplayStyle::Intel).to_string()); + /// ``` pub fn display_with<'a>(&'a self, style: display::DisplayStyle) -> display::InstructionDisplayer<'a> { display::InstructionDisplayer { style, @@ -4297,11 +4395,15 @@ impl Instruction { } #[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub struct EvexData { +struct EvexData { // data: present, z, b, Lp, Rp. aaa bits: u8, } +/// the prefixes on an instruction. +/// +/// `rep`, `repnz`, `lock`, and segment override prefixes are directly accessible here. `vex` and +/// `evex` prefixes are available through their associated helpers. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct Prefixes { bits: u8, @@ -4320,26 +4422,32 @@ impl PrefixEvex { fn present(&self) -> bool { self.evex_data.present() } - fn vex(&self) -> &PrefixVex { + /// the `evex` prefix's parts that overlap with `vex` definitions - `L`, `W`, `R`, `X`, and `B` + /// bits. + pub fn vex(&self) -> &PrefixVex { &self.vex } - fn mask_reg(&self) -> u8 { + /// the `avx512` mask register in use. `0` indicates "no mask register". + pub fn mask_reg(&self) -> u8 { self.evex_data.aaa() } - fn broadcast(&self) -> bool { + pub fn broadcast(&self) -> bool { self.evex_data.b() } - fn merge(&self) -> bool { + pub fn merge(&self) -> bool { self.evex_data.z() } - fn lp(&self) -> bool { + /// the `evex` `L'` bit. + pub fn lp(&self) -> bool { self.evex_data.lp() } - fn rp(&self) -> bool { + /// the `evex` `R'` bit. + pub fn rp(&self) -> bool { self.evex_data.rp() } } +/// bits specified in an avx/avx2 [`vex`](https://en.wikipedia.org/wiki/VEX_prefix) prefix, `L`, `W`, `R`, `X`, and `B`. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct PrefixVex { bits: u8, @@ -4348,15 +4456,15 @@ pub struct PrefixVex { #[allow(dead_code)] impl PrefixVex { #[inline] - fn b(&self) -> bool { (self.bits & 0x01) == 0x01 } + pub fn b(&self) -> bool { (self.bits & 0x01) == 0x01 } #[inline] - fn x(&self) -> bool { (self.bits & 0x02) == 0x02 } + pub fn x(&self) -> bool { (self.bits & 0x02) == 0x02 } #[inline] - fn r(&self) -> bool { (self.bits & 0x04) == 0x04 } + pub fn r(&self) -> bool { (self.bits & 0x04) == 0x04 } #[inline] - fn w(&self) -> bool { (self.bits & 0x08) == 0x08 } + pub fn w(&self) -> bool { (self.bits & 0x08) == 0x08 } #[inline] - fn l(&self) -> bool { (self.bits & 0x10) == 0x10 } + pub fn l(&self) -> bool { (self.bits & 0x10) == 0x10 } #[inline] fn present(&self) -> bool { (self.bits & 0x80) == 0x80 } #[inline] @@ -4383,10 +4491,6 @@ impl Prefixes { #[inline] fn set_rep(&mut self) { self.bits = (self.bits & 0xcf) | 0x10 } #[inline] - pub fn repz(&self) -> bool { self.bits & 0x30 == 0x20 } - #[inline] - fn set_repz(&mut self) { self.bits = (self.bits & 0xcf) | 0x20 } - #[inline] pub fn repnz(&self) -> bool { self.bits & 0x30 == 0x30 } #[inline] fn set_repnz(&mut self) { self.bits = (self.bits & 0xcf) | 0x30 } @@ -4431,11 +4535,20 @@ impl Prefixes { #[inline] fn set_ss(&mut self) { self.segment = Segment::SS } #[inline] - fn vex(&self) -> PrefixVex { PrefixVex { bits: self.vex.bits } } + fn vex_unchecked(&self) -> PrefixVex { PrefixVex { bits: self.vex.bits } } + #[inline] + pub fn vex(&self) -> Option { + let vex = self.vex_unchecked(); + if vex.present() { + Some(vex) + } else { + None + } + } #[inline] fn evex_unchecked(&self) -> PrefixEvex { PrefixEvex { vex: PrefixVex { bits: self.vex.bits }, evex_data: self.evex_data } } #[inline] - fn evex(&self) -> Option { + pub fn evex(&self) -> Option { let evex = self.evex_unchecked(); if evex.present() { Some(evex) @@ -5484,19 +5597,24 @@ fn read_modrm_reg(instr: &mut Instruction, modrm: u8, reg_bank: RegisterBank) -> fn read_sib::Address, ::Word>>(words: &mut T, instr: &mut Instruction, modrm: u8) -> Result { let modbits = modrm >> 6; let sibbyte = words.next().ok().ok_or(DecodeError::ExhaustedInput)?; + instr.regs[1].num |= sibbyte & 7; + instr.regs[2].num |= (sibbyte >> 3) & 7; let disp = if modbits == 0b00 { if (sibbyte & 7) == 0b101 { - read_num(words, 4)? as i32 as u32 + read_num(words, 4)? as i32 } else { 0 } } else if modbits == 0b01 { - read_num(words, 1)? as i8 as i32 as u32 + read_num(words, 1)? as i8 as i32 } else { - read_num(words, 4)? as i32 as u32 + read_num(words, 4)? as i32 }; - instr.disp = disp; + instr.disp = disp as u32; + + let scale = 1u8 << (sibbyte >> 6); + instr.scale = scale; let op_spec = if (sibbyte & 7) == 0b101 { if ((sibbyte >> 3) & 7) == 0b100 { diff --git a/src/protected_mode/vex.rs b/src/protected_mode/vex.rs index 36ccc66..053d1aa 100644 --- a/src/protected_mode/vex.rs +++ b/src/protected_mode/vex.rs @@ -1326,10 +1326,10 @@ fn read_vex_instruction::Address, ::Address, if instruction.prefixes.vex().w() { + 0x6E => if instruction.prefixes.vex_unchecked().w() { (Opcode::VMOVD, if L { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); @@ -1761,7 +1761,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x7E => if instruction.prefixes.vex_unchecked().w() { (Opcode::VMOVD, if L { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); @@ -2050,17 +2050,17 @@ fn read_vex_instruction::Address, (Opcode::VCVTSI2SD, if instruction.prefixes.vex().w() { + 0x2a => (Opcode::VCVTSI2SD, if instruction.prefixes.vex_unchecked().w() { VEXOperandCode::G_V_xmm_Ed // 32-bit last operand } else { VEXOperandCode::G_V_xmm_Ed // 32-bit last operand }), - 0x2c => (Opcode::VCVTTSD2SI, if instruction.prefixes.vex().w() { + 0x2c => (Opcode::VCVTTSD2SI, if instruction.prefixes.vex_unchecked().w() { VEXOperandCode::VCVT_Gd_Eq_xmm } else { VEXOperandCode::VCVT_Gd_Eq_xmm }), - 0x2d => (Opcode::VCVTSD2SI, if instruction.prefixes.vex().w() { + 0x2d => (Opcode::VCVTSD2SI, if instruction.prefixes.vex_unchecked().w() { VEXOperandCode::VCVT_Gd_Eq_xmm } else { VEXOperandCode::VCVT_Gd_Eq_xmm @@ -2116,17 +2116,17 @@ fn read_vex_instruction::Address, (Opcode::VMOVSS, VEXOperandCode::VMOVSS_11), 0x12 => (Opcode::VMOVSLDUP, if L { VEXOperandCode::G_E_ymm } else { VEXOperandCode::G_E_xmm }), 0x16 => (Opcode::VMOVSHDUP, if L { VEXOperandCode::G_E_ymm } else { VEXOperandCode::G_E_xmm }), - 0x2a => (Opcode::VCVTSI2SS, if instruction.prefixes.vex().w() { + 0x2a => (Opcode::VCVTSI2SS, if instruction.prefixes.vex_unchecked().w() { VEXOperandCode::G_V_xmm_Ed } else { VEXOperandCode::G_V_xmm_Ed }), - 0x2c => (Opcode::VCVTTSS2SI, if instruction.prefixes.vex().w() { + 0x2c => (Opcode::VCVTTSS2SI, if instruction.prefixes.vex_unchecked().w() { VEXOperandCode::VCVT_Gd_Ed_xmm // 32-bit } else { VEXOperandCode::VCVT_Gd_Ed_xmm // 32-bit }), - 0x2d => (Opcode::VCVTSS2SI, if instruction.prefixes.vex().w() { + 0x2d => (Opcode::VCVTSS2SI, if instruction.prefixes.vex_unchecked().w() { VEXOperandCode::VCVT_Gd_Ed_xmm // 32-bit } else { VEXOperandCode::VCVT_Gd_Ed_xmm // 32-bit @@ -2251,7 +2251,7 @@ fn read_vex_instruction::Address, (Opcode::VPERMPS, if L { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_ymm @@ -2264,7 +2264,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x18 => if instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { @@ -2274,7 +2274,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x19 => if instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { @@ -2467,7 +2467,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x45 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VPSRLVQ, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2481,17 +2481,17 @@ fn read_vex_instruction::Address, (Opcode::VPSRAVD, if L { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_ymm } else { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_xmm }), - 0x47 => if instruction.prefixes.vex().w() { + 0x47 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VPSLLVQ, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2515,7 +2515,7 @@ fn read_vex_instruction::Address, (Opcode::VBROADCASTI128, if L { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_ymm_M_xmm @@ -2534,7 +2534,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VPMASKMOVQ, if L { VEXOperandCode::G_V_M_ymm } else { @@ -2549,7 +2549,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VPMASKMOVQ, if L { VEXOperandCode::M_V_G_ymm } else { @@ -2564,7 +2564,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VPGATHERDQ, if L { VEXOperandCode::G_Ey_V_ymm } else { @@ -2579,7 +2579,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VPGATHERQQ, if L { VEXOperandCode::G_Ey_V_ymm } else { @@ -2594,7 +2594,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VGATHERDPD, if L { VEXOperandCode::G_Ey_V_ymm } else { @@ -2609,7 +2609,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VGATHERQPD, if L { VEXOperandCode::G_Ey_V_ymm } else { @@ -2624,7 +2624,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADDSUB132PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2639,7 +2639,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUBADD132PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2654,7 +2654,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADD132PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2668,13 +2668,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x99 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADD132SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFMADD132SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0x9A => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUB132PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2688,13 +2688,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x9B => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUB132SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFMSUB132SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0x9C => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMADD132PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2708,13 +2708,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x9D => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMADD132SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFNMADD132SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0x9E => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMSUB132PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2728,13 +2728,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x9F => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMSUB132SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFNMSUB132SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xA6 => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADDSUB213PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2749,7 +2749,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUBADD213PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2764,7 +2764,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADD213PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2778,13 +2778,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xA9 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADD231SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFMADD231SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xAA => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUB213PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2798,13 +2798,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xAB => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUB231SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFMSUB231SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xAC => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMADD213PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2818,13 +2818,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xAD => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMADD213SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFNMADD213SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xAE => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMSUB213PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2838,13 +2838,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xAF => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMSUB213SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFNMSUB213SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xB6 => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADDSUB231PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2859,7 +2859,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUBADD231PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2874,7 +2874,7 @@ fn read_vex_instruction::Address, { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADD231PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2888,13 +2888,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xB9 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMADD231SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFMADD231SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xBA => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUB231PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2908,13 +2908,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xBB => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFMSUB231SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFMSUB231SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xBC => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMADD231PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2928,13 +2928,13 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xBD => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMADD231SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFNMADD231SS, VEXOperandCode::G_V_E_xmm /* 64bit */) }, 0xBE => { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMSUB231PD, if L { VEXOperandCode::G_V_E_ymm } else { @@ -2948,7 +2948,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0xBF => if instruction.prefixes.vex_unchecked().w() { (Opcode::VFNMSUB231SD, VEXOperandCode::G_V_E_xmm /* 64bit */) } else { (Opcode::VFNMSUB231SS, VEXOperandCode::G_V_E_xmm /* 64bit */) @@ -3072,7 +3072,7 @@ fn read_vex_instruction::Address, (Opcode::VPERMQ, if L { - if !instruction.prefixes.vex().w() { + if !instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_E_ymm_imm8 @@ -3081,7 +3081,7 @@ fn read_vex_instruction::Address, (Opcode::VPERMPD, if L { - if !instruction.prefixes.vex().w() { + if !instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_E_ymm_imm8 @@ -3090,12 +3090,12 @@ fn read_vex_instruction::Address, (Opcode::VPBLENDD, if L { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_ymm_imm8 } else { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_xmm_imm8 @@ -3111,7 +3111,7 @@ fn read_vex_instruction::Address, (Opcode::VPERM2F128, if L { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_ymm_imm8 @@ -3159,19 +3159,19 @@ fn read_vex_instruction::Address, (Opcode::VPEXTRB, if L || instruction.prefixes.vex().w() { + 0x14 => (Opcode::VPEXTRB, if L || instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { VEXOperandCode::Ev_G_xmm_imm8 }), - 0x15 => (Opcode::VPEXTRW, if L || instruction.prefixes.vex().w() { + 0x15 => (Opcode::VPEXTRW, if L || instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { VEXOperandCode::Ev_G_xmm_imm8 }), - 0x16 => if instruction.prefixes.vex().w() { + 0x16 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VPEXTRQ, if L { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); @@ -3193,7 +3193,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x18 => if instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { @@ -3204,7 +3204,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x19 => if instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { @@ -3232,7 +3232,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x22 => if instruction.prefixes.vex_unchecked().w() { (Opcode::VPINSRQ, if L { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); @@ -3282,7 +3282,7 @@ fn read_vex_instruction::Address, (Opcode::VPERM2I128, if L { - if instruction.prefixes.vex().w() { + if instruction.prefixes.vex_unchecked().w() { return Err(DecodeError::InvalidOpcode); } VEXOperandCode::G_V_E_ymm_imm8 @@ -3300,7 +3300,7 @@ fn read_vex_instruction::Address, if instruction.prefixes.vex().w() { + 0x4C => if instruction.prefixes.vex_unchecked().w() { instruction.opcode = Opcode::Invalid; return Err(DecodeError::InvalidOpcode); } else { -- cgit v1.1