forked from OSchip/llvm-project
parent
44be217af1
commit
cd9da22f68
|
@ -1,31 +1,31 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vabs.s8 d16, d16 @ encoding: [0x20,0x03,0xf1,0xf3]
|
||||
@ CHECK: vabs.s8 d16, d16 @ encoding: [0x20,0x03,0xf1,0xf3]
|
||||
vabs.s8 d16, d16
|
||||
// CHECK: vabs.s16 d16, d16 @ encoding: [0x20,0x03,0xf5,0xf3]
|
||||
@ CHECK: vabs.s16 d16, d16 @ encoding: [0x20,0x03,0xf5,0xf3]
|
||||
vabs.s16 d16, d16
|
||||
// CHECK: vabs.s32 d16, d16 @ encoding: [0x20,0x03,0xf9,0xf3]
|
||||
@ CHECK: vabs.s32 d16, d16 @ encoding: [0x20,0x03,0xf9,0xf3]
|
||||
vabs.s32 d16, d16
|
||||
// CHECK: vabs.f32 d16, d16 @ encoding: [0x20,0x07,0xf9,0xf3]
|
||||
@ CHECK: vabs.f32 d16, d16 @ encoding: [0x20,0x07,0xf9,0xf3]
|
||||
vabs.f32 d16, d16
|
||||
// CHECK: vabs.s8 q8, q8 @ encoding: [0x60,0x03,0xf1,0xf3]
|
||||
@ CHECK: vabs.s8 q8, q8 @ encoding: [0x60,0x03,0xf1,0xf3]
|
||||
vabs.s8 q8, q8
|
||||
// CHECK: vabs.s16 q8, q8 @ encoding: [0x60,0x03,0xf5,0xf3]
|
||||
@ CHECK: vabs.s16 q8, q8 @ encoding: [0x60,0x03,0xf5,0xf3]
|
||||
vabs.s16 q8, q8
|
||||
// CHECK: vabs.s32 q8, q8 @ encoding: [0x60,0x03,0xf9,0xf3]
|
||||
@ CHECK: vabs.s32 q8, q8 @ encoding: [0x60,0x03,0xf9,0xf3]
|
||||
vabs.s32 q8, q8
|
||||
// CHECK: vabs.f32 q8, q8 @ encoding: [0x60,0x07,0xf9,0xf3]
|
||||
@ CHECK: vabs.f32 q8, q8 @ encoding: [0x60,0x07,0xf9,0xf3]
|
||||
vabs.f32 q8, q8
|
||||
|
||||
// CHECK: vqabs.s8 d16, d16 @ encoding: [0x20,0x07,0xf0,0xf3]
|
||||
@ CHECK: vqabs.s8 d16, d16 @ encoding: [0x20,0x07,0xf0,0xf3]
|
||||
vqabs.s8 d16, d16
|
||||
// CHECK: vqabs.s16 d16, d16 @ encoding: [0x20,0x07,0xf4,0xf3]
|
||||
@ CHECK: vqabs.s16 d16, d16 @ encoding: [0x20,0x07,0xf4,0xf3]
|
||||
vqabs.s16 d16, d16
|
||||
// CHECK: vqabs.s32 d16, d16 @ encoding: [0x20,0x07,0xf8,0xf3]
|
||||
@ CHECK: vqabs.s32 d16, d16 @ encoding: [0x20,0x07,0xf8,0xf3]
|
||||
vqabs.s32 d16, d16
|
||||
// CHECK: vqabs.s8 q8, q8 @ encoding: [0x60,0x07,0xf0,0xf3]
|
||||
@ CHECK: vqabs.s8 q8, q8 @ encoding: [0x60,0x07,0xf0,0xf3]
|
||||
vqabs.s8 q8, q8
|
||||
// CHECK: vqabs.s16 q8, q8 @ encoding: [0x60,0x07,0xf4,0xf3]
|
||||
@ CHECK: vqabs.s16 q8, q8 @ encoding: [0x60,0x07,0xf4,0xf3]
|
||||
vqabs.s16 q8, q8
|
||||
// CHECK: vqabs.s32 q8, q8 @ encoding: [0x60,0x07,0xf8,0xf3]
|
||||
@ CHECK: vqabs.s32 q8, q8 @ encoding: [0x60,0x07,0xf8,0xf3]
|
||||
vqabs.s32 q8, q8
|
||||
|
|
|
@ -1,84 +1,84 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
// NOTE: This currently fails because the ASM parser doesn't parse vabal.
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
@ NOTE: This currently fails because the ASM parser doesn't parse vabal.
|
||||
|
||||
// CHECK: vabd.s8 d16, d16, d17 @ encoding: [0xa1,0x07,0x40,0xf2]
|
||||
@ CHECK: vabd.s8 d16, d16, d17 @ encoding: [0xa1,0x07,0x40,0xf2]
|
||||
vabd.s8 d16, d16, d17
|
||||
// CHECK: vabd.s16 d16, d16, d17 @ encoding: [0xa1,0x07,0x50,0xf2]
|
||||
@ CHECK: vabd.s16 d16, d16, d17 @ encoding: [0xa1,0x07,0x50,0xf2]
|
||||
vabd.s16 d16, d16, d17
|
||||
// CHECK: vabd.s32 d16, d16, d17 @ encoding: [0xa1,0x07,0x60,0xf2]
|
||||
@ CHECK: vabd.s32 d16, d16, d17 @ encoding: [0xa1,0x07,0x60,0xf2]
|
||||
vabd.s32 d16, d16, d17
|
||||
// CHECK: vabd.u8 d16, d16, d17 @ encoding: [0xa1,0x07,0x40,0xf3]
|
||||
@ CHECK: vabd.u8 d16, d16, d17 @ encoding: [0xa1,0x07,0x40,0xf3]
|
||||
vabd.u8 d16, d16, d17
|
||||
// CHECK: vabd.u16 d16, d16, d17 @ encoding: [0xa1,0x07,0x50,0xf3]
|
||||
@ CHECK: vabd.u16 d16, d16, d17 @ encoding: [0xa1,0x07,0x50,0xf3]
|
||||
vabd.u16 d16, d16, d17
|
||||
// CHECK: vabd.u32 d16, d16, d17 @ encoding: [0xa1,0x07,0x60,0xf3]
|
||||
@ CHECK: vabd.u32 d16, d16, d17 @ encoding: [0xa1,0x07,0x60,0xf3]
|
||||
vabd.u32 d16, d16, d17
|
||||
// CHECK: vabd.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x60,0xf3]
|
||||
@ CHECK: vabd.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x60,0xf3]
|
||||
vabd.f32 d16, d16, d17
|
||||
// CHECK: vabd.s8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf2]
|
||||
@ CHECK: vabd.s8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf2]
|
||||
vabd.s8 q8, q8, q9
|
||||
// CHECK: vabd.s16 q8, q8, q9 @ encoding: [0xe2,0x07,0x50,0xf2]
|
||||
@ CHECK: vabd.s16 q8, q8, q9 @ encoding: [0xe2,0x07,0x50,0xf2]
|
||||
vabd.s16 q8, q8, q9
|
||||
// CHECK: vabd.s32 q8, q8, q9 @ encoding: [0xe2,0x07,0x60,0xf2]
|
||||
@ CHECK: vabd.s32 q8, q8, q9 @ encoding: [0xe2,0x07,0x60,0xf2]
|
||||
vabd.s32 q8, q8, q9
|
||||
// CHECK: vabd.u8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf3]
|
||||
@ CHECK: vabd.u8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf3]
|
||||
vabd.u8 q8, q8, q9
|
||||
// CHECK: vabd.u16 q8, q8, q9 @ encoding: [0xe2,0x07,0x50,0xf3]
|
||||
@ CHECK: vabd.u16 q8, q8, q9 @ encoding: [0xe2,0x07,0x50,0xf3]
|
||||
vabd.u16 q8, q8, q9
|
||||
// CHECK: vabd.u32 q8, q8, q9 @ encoding: [0xe2,0x07,0x60,0xf3]
|
||||
@ CHECK: vabd.u32 q8, q8, q9 @ encoding: [0xe2,0x07,0x60,0xf3]
|
||||
vabd.u32 q8, q8, q9
|
||||
// CHECK: vabd.f32 q8, q8, q9 @ encoding: [0xe2,0x0d,0x60,0xf3]
|
||||
@ CHECK: vabd.f32 q8, q8, q9 @ encoding: [0xe2,0x0d,0x60,0xf3]
|
||||
vabd.f32 q8, q8, q9
|
||||
|
||||
// CHECK: vabdl.s8 q8, d16, d17 @ encoding: [0xa1,0x07,0xc0,0xf2]
|
||||
@ CHECK: vabdl.s8 q8, d16, d17 @ encoding: [0xa1,0x07,0xc0,0xf2]
|
||||
vabdl.s8 q8, d16, d17
|
||||
// CHECK: vabdl.s16 q8, d16, d17 @ encoding: [0xa1,0x07,0xd0,0xf2]
|
||||
@ CHECK: vabdl.s16 q8, d16, d17 @ encoding: [0xa1,0x07,0xd0,0xf2]
|
||||
vabdl.s16 q8, d16, d17
|
||||
// CHECK: vabdl.s32 q8, d16, d17 @ encoding: [0xa1,0x07,0xe0,0xf2]
|
||||
@ CHECK: vabdl.s32 q8, d16, d17 @ encoding: [0xa1,0x07,0xe0,0xf2]
|
||||
vabdl.s32 q8, d16, d17
|
||||
// CHECK: vabdl.u8 q8, d16, d17 @ encoding: [0xa1,0x07,0xc0,0xf3]
|
||||
@ CHECK: vabdl.u8 q8, d16, d17 @ encoding: [0xa1,0x07,0xc0,0xf3]
|
||||
vabdl.u8 q8, d16, d17
|
||||
// CHECK: vabdl.u16 q8, d16, d17 @ encoding: [0xa1,0x07,0xd0,0xf3]
|
||||
@ CHECK: vabdl.u16 q8, d16, d17 @ encoding: [0xa1,0x07,0xd0,0xf3]
|
||||
vabdl.u16 q8, d16, d17
|
||||
// CHECK: vabdl.u32 q8, d16, d17 @ encoding: [0xa1,0x07,0xe0,0xf3]
|
||||
@ CHECK: vabdl.u32 q8, d16, d17 @ encoding: [0xa1,0x07,0xe0,0xf3]
|
||||
vabdl.u32 q8, d16, d17
|
||||
|
||||
// CHECK: vaba.s8 d16, d18, d17 @ encoding: [0xb1,0x07,0x42,0xf2]
|
||||
@ CHECK: vaba.s8 d16, d18, d17 @ encoding: [0xb1,0x07,0x42,0xf2]
|
||||
vaba.s8 d16, d18, d17
|
||||
// CHECK: vaba.s16 d16, d18, d17 @ encoding: [0xb1,0x07,0x52,0xf2]
|
||||
@ CHECK: vaba.s16 d16, d18, d17 @ encoding: [0xb1,0x07,0x52,0xf2]
|
||||
vaba.s16 d16, d18, d17
|
||||
// CHECK: vaba.s32 d16, d18, d17 @ encoding: [0xb1,0x07,0x62,0xf2]
|
||||
@ CHECK: vaba.s32 d16, d18, d17 @ encoding: [0xb1,0x07,0x62,0xf2]
|
||||
vaba.s32 d16, d18, d17
|
||||
// CHECK: vaba.u8 d16, d18, d17 @ encoding: [0xb1,0x07,0x42,0xf3]
|
||||
@ CHECK: vaba.u8 d16, d18, d17 @ encoding: [0xb1,0x07,0x42,0xf3]
|
||||
vaba.u8 d16, d18, d17
|
||||
// CHECK: vaba.u16 d16, d18, d17 @ encoding: [0xb1,0x07,0x52,0xf3]
|
||||
@ CHECK: vaba.u16 d16, d18, d17 @ encoding: [0xb1,0x07,0x52,0xf3]
|
||||
vaba.u16 d16, d18, d17
|
||||
// CHECK: vaba.u32 d16, d18, d17 @ encoding: [0xb1,0x07,0x62,0xf3]
|
||||
@ CHECK: vaba.u32 d16, d18, d17 @ encoding: [0xb1,0x07,0x62,0xf3]
|
||||
vaba.u32 d16, d18, d17
|
||||
// CHECK: vaba.s8 q9, q8, q10 @ encoding: [0xf4,0x27,0x40,0xf2]
|
||||
@ CHECK: vaba.s8 q9, q8, q10 @ encoding: [0xf4,0x27,0x40,0xf2]
|
||||
vaba.s8 q9, q8, q10
|
||||
// CHECK: vaba.s16 q9, q8, q10 @ encoding: [0xf4,0x27,0x50,0xf2]
|
||||
@ CHECK: vaba.s16 q9, q8, q10 @ encoding: [0xf4,0x27,0x50,0xf2]
|
||||
vaba.s16 q9, q8, q10
|
||||
// CHECK: vaba.s32 q9, q8, q10 @ encoding: [0xf4,0x27,0x60,0xf2]
|
||||
@ CHECK: vaba.s32 q9, q8, q10 @ encoding: [0xf4,0x27,0x60,0xf2]
|
||||
vaba.s32 q9, q8, q10
|
||||
// CHECK: vaba.u8 q9, q8, q10 @ encoding: [0xf4,0x27,0x40,0xf3]
|
||||
@ CHECK: vaba.u8 q9, q8, q10 @ encoding: [0xf4,0x27,0x40,0xf3]
|
||||
vaba.u8 q9, q8, q10
|
||||
// CHECK: vaba.u16 q9, q8, q10 @ encoding: [0xf4,0x27,0x50,0xf3]
|
||||
@ CHECK: vaba.u16 q9, q8, q10 @ encoding: [0xf4,0x27,0x50,0xf3]
|
||||
vaba.u16 q9, q8, q10
|
||||
// CHECK: vaba.u32 q9, q8, q10 @ encoding: [0xf4,0x27,0x60,0xf3]
|
||||
@ CHECK: vaba.u32 q9, q8, q10 @ encoding: [0xf4,0x27,0x60,0xf3]
|
||||
vaba.u32 q9, q8, q10
|
||||
|
||||
// CHECK: vabal.s8 q8, d19, d18 @ encoding: [0xa2,0x05,0xc3,0xf2]
|
||||
@ CHECK: vabal.s8 q8, d19, d18 @ encoding: [0xa2,0x05,0xc3,0xf2]
|
||||
vabal.s8 q8, d19, d18
|
||||
// CHECK: vabal.s16 q8, d19, d18 @ encoding: [0xa2,0x05,0xd3,0xf2]
|
||||
@ CHECK: vabal.s16 q8, d19, d18 @ encoding: [0xa2,0x05,0xd3,0xf2]
|
||||
vabal.s16 q8, d19, d18
|
||||
// CHECK: vabal.s32 q8, d19, d18 @ encoding: [0xa2,0x05,0xe3,0xf2]
|
||||
@ CHECK: vabal.s32 q8, d19, d18 @ encoding: [0xa2,0x05,0xe3,0xf2]
|
||||
vabal.s32 q8, d19, d18
|
||||
// CHECK: vabal.u8 q8, d19, d18 @ encoding: [0xa2,0x05,0xc3,0xf3]
|
||||
@ CHECK: vabal.u8 q8, d19, d18 @ encoding: [0xa2,0x05,0xc3,0xf3]
|
||||
vabal.u8 q8, d19, d18
|
||||
// CHECK: vabal.u16 q8, d19, d18 @ encoding: [0xa2,0x05,0xd3,0xf3]
|
||||
@ CHECK: vabal.u16 q8, d19, d18 @ encoding: [0xa2,0x05,0xd3,0xf3]
|
||||
vabal.u16 q8, d19, d18
|
||||
// CHECK: vabal.u32 q8, d19, d18 @ encoding: [0xa2,0x05,0xe3,0xf3]
|
||||
@ CHECK: vabal.u32 q8, d19, d18 @ encoding: [0xa2,0x05,0xe3,0xf3]
|
||||
vabal.u32 q8, d19, d18
|
||||
|
||||
|
|
|
@ -1,137 +1,137 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple armv7-apple-darwin -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple armv7-apple-darwin -show-encoding < %s | FileCheck %s
|
||||
|
||||
|
||||
// CHECK: vadd.i8 d16, d17, d16 @ encoding: [0xa0,0x08,0x41,0xf2]
|
||||
@ CHECK: vadd.i8 d16, d17, d16 @ encoding: [0xa0,0x08,0x41,0xf2]
|
||||
vadd.i8 d16, d17, d16
|
||||
// CHECK: vadd.i16 d16, d17, d16 @ encoding: [0xa0,0x08,0x51,0xf2]
|
||||
@ CHECK: vadd.i16 d16, d17, d16 @ encoding: [0xa0,0x08,0x51,0xf2]
|
||||
vadd.i16 d16, d17, d16
|
||||
// CHECK: vadd.i64 d16, d17, d16 @ encoding: [0xa0,0x08,0x71,0xf2]
|
||||
@ CHECK: vadd.i64 d16, d17, d16 @ encoding: [0xa0,0x08,0x71,0xf2]
|
||||
vadd.i64 d16, d17, d16
|
||||
// CHECK: vadd.i32 d16, d17, d16 @ encoding: [0xa0,0x08,0x61,0xf2]
|
||||
@ CHECK: vadd.i32 d16, d17, d16 @ encoding: [0xa0,0x08,0x61,0xf2]
|
||||
vadd.i32 d16, d17, d16
|
||||
// CHECK: vadd.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x40,0xf2]
|
||||
@ CHECK: vadd.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x40,0xf2]
|
||||
vadd.f32 d16, d16, d17
|
||||
// CHECK: vadd.f32 q8, q8, q9 @ encoding: [0xe2,0x0d,0x40,0xf2]
|
||||
@ CHECK: vadd.f32 q8, q8, q9 @ encoding: [0xe2,0x0d,0x40,0xf2]
|
||||
vadd.f32 q8, q8, q9
|
||||
|
||||
// CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf2]
|
||||
@ CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf2]
|
||||
vaddl.s8 q8, d17, d16
|
||||
// CHECK: vaddl.s16 q8, d17, d16 @ encoding: [0xa0,0x00,0xd1,0xf2]
|
||||
@ CHECK: vaddl.s16 q8, d17, d16 @ encoding: [0xa0,0x00,0xd1,0xf2]
|
||||
vaddl.s16 q8, d17, d16
|
||||
// CHECK: vaddl.s32 q8, d17, d16 @ encoding: [0xa0,0x00,0xe1,0xf2]
|
||||
@ CHECK: vaddl.s32 q8, d17, d16 @ encoding: [0xa0,0x00,0xe1,0xf2]
|
||||
vaddl.s32 q8, d17, d16
|
||||
// CHECK: vaddl.u8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf3]
|
||||
@ CHECK: vaddl.u8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf3]
|
||||
vaddl.u8 q8, d17, d16
|
||||
// CHECK: vaddl.u16 q8, d17, d16 @ encoding: [0xa0,0x00,0xd1,0xf3]
|
||||
@ CHECK: vaddl.u16 q8, d17, d16 @ encoding: [0xa0,0x00,0xd1,0xf3]
|
||||
vaddl.u16 q8, d17, d16
|
||||
// CHECK: vaddl.u32 q8, d17, d16 @ encoding: [0xa0,0x00,0xe1,0xf3]
|
||||
@ CHECK: vaddl.u32 q8, d17, d16 @ encoding: [0xa0,0x00,0xe1,0xf3]
|
||||
vaddl.u32 q8, d17, d16
|
||||
|
||||
// CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2]
|
||||
@ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2]
|
||||
vaddw.s8 q8, q8, d18
|
||||
// CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf2]
|
||||
@ CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf2]
|
||||
vaddw.s16 q8, q8, d18
|
||||
// CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf2]
|
||||
@ CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf2]
|
||||
vaddw.s32 q8, q8, d18
|
||||
// CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf3]
|
||||
@ CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf3]
|
||||
vaddw.u8 q8, q8, d18
|
||||
// CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf3]
|
||||
@ CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf3]
|
||||
vaddw.u16 q8, q8, d18
|
||||
// CHECK: vaddw.u32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf3]
|
||||
@ CHECK: vaddw.u32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf3]
|
||||
vaddw.u32 q8, q8, d18
|
||||
|
||||
// CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf2]
|
||||
@ CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf2]
|
||||
vhadd.s8 d16, d16, d17
|
||||
// CHECK: vhadd.s16 d16, d16, d17 @ encoding: [0xa1,0x00,0x50,0xf2]
|
||||
@ CHECK: vhadd.s16 d16, d16, d17 @ encoding: [0xa1,0x00,0x50,0xf2]
|
||||
vhadd.s16 d16, d16, d17
|
||||
// CHECK: vhadd.s32 d16, d16, d17 @ encoding: [0xa1,0x00,0x60,0xf2]
|
||||
@ CHECK: vhadd.s32 d16, d16, d17 @ encoding: [0xa1,0x00,0x60,0xf2]
|
||||
vhadd.s32 d16, d16, d17
|
||||
// CHECK: vhadd.u8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf3]
|
||||
@ CHECK: vhadd.u8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf3]
|
||||
vhadd.u8 d16, d16, d17
|
||||
// CHECK: vhadd.u16 d16, d16, d17 @ encoding: [0xa1,0x00,0x50,0xf3]
|
||||
@ CHECK: vhadd.u16 d16, d16, d17 @ encoding: [0xa1,0x00,0x50,0xf3]
|
||||
vhadd.u16 d16, d16, d17
|
||||
// CHECK: vhadd.u32 d16, d16, d17 @ encoding: [0xa1,0x00,0x60,0xf3]
|
||||
@ CHECK: vhadd.u32 d16, d16, d17 @ encoding: [0xa1,0x00,0x60,0xf3]
|
||||
vhadd.u32 d16, d16, d17
|
||||
// CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2]
|
||||
@ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2]
|
||||
vhadd.s8 q8, q8, q9
|
||||
// CHECK: vhadd.s16 q8, q8, q9 @ encoding: [0xe2,0x00,0x50,0xf2]
|
||||
@ CHECK: vhadd.s16 q8, q8, q9 @ encoding: [0xe2,0x00,0x50,0xf2]
|
||||
vhadd.s16 q8, q8, q9
|
||||
// CHECK: vhadd.s32 q8, q8, q9 @ encoding: [0xe2,0x00,0x60,0xf2]
|
||||
@ CHECK: vhadd.s32 q8, q8, q9 @ encoding: [0xe2,0x00,0x60,0xf2]
|
||||
vhadd.s32 q8, q8, q9
|
||||
// CHECK: vhadd.u8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf3]
|
||||
@ CHECK: vhadd.u8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf3]
|
||||
vhadd.u8 q8, q8, q9
|
||||
// CHECK: vhadd.u16 q8, q8, q9 @ encoding: [0xe2,0x00,0x50,0xf3]
|
||||
@ CHECK: vhadd.u16 q8, q8, q9 @ encoding: [0xe2,0x00,0x50,0xf3]
|
||||
vhadd.u16 q8, q8, q9
|
||||
// CHECK: vhadd.u32 q8, q8, q9 @ encoding: [0xe2,0x00,0x60,0xf3]
|
||||
@ CHECK: vhadd.u32 q8, q8, q9 @ encoding: [0xe2,0x00,0x60,0xf3]
|
||||
vhadd.u32 q8, q8, q9
|
||||
|
||||
// CHECK: vrhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x01,0x40,0xf2]
|
||||
@ CHECK: vrhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x01,0x40,0xf2]
|
||||
vrhadd.s8 d16, d16, d17
|
||||
// CHECK: vrhadd.s16 d16, d16, d17 @ encoding: [0xa1,0x01,0x50,0xf2]
|
||||
@ CHECK: vrhadd.s16 d16, d16, d17 @ encoding: [0xa1,0x01,0x50,0xf2]
|
||||
vrhadd.s16 d16, d16, d17
|
||||
// CHECK: vrhadd.s32 d16, d16, d17 @ encoding: [0xa1,0x01,0x60,0xf2]
|
||||
@ CHECK: vrhadd.s32 d16, d16, d17 @ encoding: [0xa1,0x01,0x60,0xf2]
|
||||
vrhadd.s32 d16, d16, d17
|
||||
// CHECK: vrhadd.u8 d16, d16, d17 @ encoding: [0xa1,0x01,0x40,0xf3]
|
||||
@ CHECK: vrhadd.u8 d16, d16, d17 @ encoding: [0xa1,0x01,0x40,0xf3]
|
||||
vrhadd.u8 d16, d16, d17
|
||||
// CHECK: vrhadd.u16 d16, d16, d17 @ encoding: [0xa1,0x01,0x50,0xf3]
|
||||
@ CHECK: vrhadd.u16 d16, d16, d17 @ encoding: [0xa1,0x01,0x50,0xf3]
|
||||
vrhadd.u16 d16, d16, d17
|
||||
// CHECK: vrhadd.u32 d16, d16, d17 @ encoding: [0xa1,0x01,0x60,0xf3]
|
||||
@ CHECK: vrhadd.u32 d16, d16, d17 @ encoding: [0xa1,0x01,0x60,0xf3]
|
||||
vrhadd.u32 d16, d16, d17
|
||||
// CHECK: vrhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x01,0x40,0xf2]
|
||||
@ CHECK: vrhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x01,0x40,0xf2]
|
||||
vrhadd.s8 q8, q8, q9
|
||||
// CHECK: vrhadd.s16 q8, q8, q9 @ encoding: [0xe2,0x01,0x50,0xf2]
|
||||
@ CHECK: vrhadd.s16 q8, q8, q9 @ encoding: [0xe2,0x01,0x50,0xf2]
|
||||
vrhadd.s16 q8, q8, q9
|
||||
// CHECK: vrhadd.s32 q8, q8, q9 @ encoding: [0xe2,0x01,0x60,0xf2]
|
||||
@ CHECK: vrhadd.s32 q8, q8, q9 @ encoding: [0xe2,0x01,0x60,0xf2]
|
||||
vrhadd.s32 q8, q8, q9
|
||||
// CHECK: vrhadd.u8 q8, q8, q9 @ encoding: [0xe2,0x01,0x40,0xf3]
|
||||
@ CHECK: vrhadd.u8 q8, q8, q9 @ encoding: [0xe2,0x01,0x40,0xf3]
|
||||
vrhadd.u8 q8, q8, q9
|
||||
// CHECK: vrhadd.u16 q8, q8, q9 @ encoding: [0xe2,0x01,0x50,0xf3]
|
||||
@ CHECK: vrhadd.u16 q8, q8, q9 @ encoding: [0xe2,0x01,0x50,0xf3]
|
||||
vrhadd.u16 q8, q8, q9
|
||||
// CHECK: vrhadd.u32 q8, q8, q9 @ encoding: [0xe2,0x01,0x60,0xf3]
|
||||
@ CHECK: vrhadd.u32 q8, q8, q9 @ encoding: [0xe2,0x01,0x60,0xf3]
|
||||
vrhadd.u32 q8, q8, q9
|
||||
|
||||
// CHECK: vqadd.s8 d16, d16, d17 @ encoding: [0xb1,0x00,0x40,0xf2]
|
||||
@ CHECK: vqadd.s8 d16, d16, d17 @ encoding: [0xb1,0x00,0x40,0xf2]
|
||||
vqadd.s8 d16, d16, d17
|
||||
// CHECK: vqadd.s16 d16, d16, d17 @ encoding: [0xb1,0x00,0x50,0xf2]
|
||||
@ CHECK: vqadd.s16 d16, d16, d17 @ encoding: [0xb1,0x00,0x50,0xf2]
|
||||
vqadd.s16 d16, d16, d17
|
||||
// CHECK: vqadd.s32 d16, d16, d17 @ encoding: [0xb1,0x00,0x60,0xf2]
|
||||
@ CHECK: vqadd.s32 d16, d16, d17 @ encoding: [0xb1,0x00,0x60,0xf2]
|
||||
vqadd.s32 d16, d16, d17
|
||||
// CHECK: vqadd.s64 d16, d16, d17 @ encoding: [0xb1,0x00,0x70,0xf2]
|
||||
@ CHECK: vqadd.s64 d16, d16, d17 @ encoding: [0xb1,0x00,0x70,0xf2]
|
||||
vqadd.s64 d16, d16, d17
|
||||
// CHECK: vqadd.u8 d16, d16, d17 @ encoding: [0xb1,0x00,0x40,0xf3]
|
||||
@ CHECK: vqadd.u8 d16, d16, d17 @ encoding: [0xb1,0x00,0x40,0xf3]
|
||||
vqadd.u8 d16, d16, d17
|
||||
// CHECK: vqadd.u16 d16, d16, d17 @ encoding: [0xb1,0x00,0x50,0xf3]
|
||||
@ CHECK: vqadd.u16 d16, d16, d17 @ encoding: [0xb1,0x00,0x50,0xf3]
|
||||
vqadd.u16 d16, d16, d17
|
||||
// CHECK: vqadd.u32 d16, d16, d17 @ encoding: [0xb1,0x00,0x60,0xf3]
|
||||
@ CHECK: vqadd.u32 d16, d16, d17 @ encoding: [0xb1,0x00,0x60,0xf3]
|
||||
vqadd.u32 d16, d16, d17
|
||||
// CHECK: vqadd.u64 d16, d16, d17 @ encoding: [0xb1,0x00,0x70,0xf3]
|
||||
@ CHECK: vqadd.u64 d16, d16, d17 @ encoding: [0xb1,0x00,0x70,0xf3]
|
||||
vqadd.u64 d16, d16, d17
|
||||
// CHECK: vqadd.s8 q8, q8, q9 @ encoding: [0xf2,0x00,0x40,0xf2]
|
||||
@ CHECK: vqadd.s8 q8, q8, q9 @ encoding: [0xf2,0x00,0x40,0xf2]
|
||||
vqadd.s8 q8, q8, q9
|
||||
// CHECK: vqadd.s16 q8, q8, q9 @ encoding: [0xf2,0x00,0x50,0xf2]
|
||||
@ CHECK: vqadd.s16 q8, q8, q9 @ encoding: [0xf2,0x00,0x50,0xf2]
|
||||
vqadd.s16 q8, q8, q9
|
||||
// CHECK: vqadd.s32 q8, q8, q9 @ encoding: [0xf2,0x00,0x60,0xf2]
|
||||
@ CHECK: vqadd.s32 q8, q8, q9 @ encoding: [0xf2,0x00,0x60,0xf2]
|
||||
vqadd.s32 q8, q8, q9
|
||||
// CHECK: vqadd.s64 q8, q8, q9 @ encoding: [0xf2,0x00,0x70,0xf2]
|
||||
@ CHECK: vqadd.s64 q8, q8, q9 @ encoding: [0xf2,0x00,0x70,0xf2]
|
||||
vqadd.s64 q8, q8, q9
|
||||
// CHECK: vqadd.u8 q8, q8, q9 @ encoding: [0xf2,0x00,0x40,0xf3]
|
||||
@ CHECK: vqadd.u8 q8, q8, q9 @ encoding: [0xf2,0x00,0x40,0xf3]
|
||||
vqadd.u8 q8, q8, q9
|
||||
// CHECK: vqadd.u16 q8, q8, q9 @ encoding: [0xf2,0x00,0x50,0xf3]
|
||||
@ CHECK: vqadd.u16 q8, q8, q9 @ encoding: [0xf2,0x00,0x50,0xf3]
|
||||
vqadd.u16 q8, q8, q9
|
||||
// CHECK: vqadd.u32 q8, q8, q9 @ encoding: [0xf2,0x00,0x60,0xf3]
|
||||
@ CHECK: vqadd.u32 q8, q8, q9 @ encoding: [0xf2,0x00,0x60,0xf3]
|
||||
vqadd.u32 q8, q8, q9
|
||||
// CHECK: vqadd.u64 q8, q8, q9 @ encoding: [0xf2,0x00,0x70,0xf3]
|
||||
@ CHECK: vqadd.u64 q8, q8, q9 @ encoding: [0xf2,0x00,0x70,0xf3]
|
||||
vqadd.u64 q8, q8, q9
|
||||
|
||||
// CHECK: vaddhn.i16 d16, q8, q9 @ encoding: [0xa2,0x04,0xc0,0xf2]
|
||||
@ CHECK: vaddhn.i16 d16, q8, q9 @ encoding: [0xa2,0x04,0xc0,0xf2]
|
||||
vaddhn.i16 d16, q8, q9
|
||||
// CHECK: vaddhn.i32 d16, q8, q9 @ encoding: [0xa2,0x04,0xd0,0xf2]
|
||||
@ CHECK: vaddhn.i32 d16, q8, q9 @ encoding: [0xa2,0x04,0xd0,0xf2]
|
||||
vaddhn.i32 d16, q8, q9
|
||||
// CHECK: vaddhn.i64 d16, q8, q9 @ encoding: [0xa2,0x04,0xe0,0xf2]
|
||||
@ CHECK: vaddhn.i64 d16, q8, q9 @ encoding: [0xa2,0x04,0xe0,0xf2]
|
||||
vaddhn.i64 d16, q8, q9
|
||||
// CHECK: vraddhn.i16 d16, q8, q9 @ encoding: [0xa2,0x04,0xc0,0xf3]
|
||||
@ CHECK: vraddhn.i16 d16, q8, q9 @ encoding: [0xa2,0x04,0xc0,0xf3]
|
||||
vraddhn.i16 d16, q8, q9
|
||||
// CHECK: vraddhn.i32 d16, q8, q9 @ encoding: [0xa2,0x04,0xd0,0xf3]
|
||||
@ CHECK: vraddhn.i32 d16, q8, q9 @ encoding: [0xa2,0x04,0xd0,0xf3]
|
||||
vraddhn.i32 d16, q8, q9
|
||||
// CHECK: vraddhn.i64 d16, q8, q9 @ encoding: [0xa2,0x04,0xe0,0xf3]
|
||||
@ CHECK: vraddhn.i64 d16, q8, q9 @ encoding: [0xa2,0x04,0xe0,0xf3]
|
||||
vraddhn.i64 d16, q8, q9
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
|
||||
// CHECK: vcnt.8 d16, d16 @ encoding: [0x20,0x05,0xf0,0xf3]
|
||||
@ CHECK: vcnt.8 d16, d16 @ encoding: [0x20,0x05,0xf0,0xf3]
|
||||
vcnt.8 d16, d16
|
||||
// CHECK: vcnt.8 q8, q8 @ encoding: [0x60,0x05,0xf0,0xf3]
|
||||
@ CHECK: vcnt.8 q8, q8 @ encoding: [0x60,0x05,0xf0,0xf3]
|
||||
vcnt.8 q8, q8
|
||||
// CHECK: vclz.i8 d16, d16 @ encoding: [0xa0,0x04,0xf0,0xf3]
|
||||
@ CHECK: vclz.i8 d16, d16 @ encoding: [0xa0,0x04,0xf0,0xf3]
|
||||
vclz.i8 d16, d16
|
||||
// CHECK: vclz.i16 d16, d16 @ encoding: [0xa0,0x04,0xf4,0xf3]
|
||||
@ CHECK: vclz.i16 d16, d16 @ encoding: [0xa0,0x04,0xf4,0xf3]
|
||||
vclz.i16 d16, d16
|
||||
// CHECK: vclz.i32 d16, d16 @ encoding: [0xa0,0x04,0xf8,0xf3]
|
||||
@ CHECK: vclz.i32 d16, d16 @ encoding: [0xa0,0x04,0xf8,0xf3]
|
||||
vclz.i32 d16, d16
|
||||
// CHECK: vclz.i8 q8, q8 @ encoding: [0xe0,0x04,0xf0,0xf3]
|
||||
@ CHECK: vclz.i8 q8, q8 @ encoding: [0xe0,0x04,0xf0,0xf3]
|
||||
vclz.i8 q8, q8
|
||||
// CHECK: vclz.i16 q8, q8 @ encoding: [0xe0,0x04,0xf4,0xf3]
|
||||
@ CHECK: vclz.i16 q8, q8 @ encoding: [0xe0,0x04,0xf4,0xf3]
|
||||
vclz.i16 q8, q8
|
||||
// CHECK: vclz.i32 q8, q8 @ encoding: [0xe0,0x04,0xf8,0xf3]
|
||||
@ CHECK: vclz.i32 q8, q8 @ encoding: [0xe0,0x04,0xf8,0xf3]
|
||||
vclz.i32 q8, q8
|
||||
// CHECK: vcls.s8 d16, d16 @ encoding: [0x20,0x04,0xf0,0xf3]
|
||||
@ CHECK: vcls.s8 d16, d16 @ encoding: [0x20,0x04,0xf0,0xf3]
|
||||
vcls.s8 d16, d16
|
||||
// CHECK: vcls.s16 d16, d16 @ encoding: [0x20,0x04,0xf4,0xf3]
|
||||
@ CHECK: vcls.s16 d16, d16 @ encoding: [0x20,0x04,0xf4,0xf3]
|
||||
vcls.s16 d16, d16
|
||||
// CHECK: vcls.s32 d16, d16 @ encoding: [0x20,0x04,0xf8,0xf3]
|
||||
@ CHECK: vcls.s32 d16, d16 @ encoding: [0x20,0x04,0xf8,0xf3]
|
||||
vcls.s32 d16, d16
|
||||
// CHECK: vcls.s8 q8, q8 @ encoding: [0x60,0x04,0xf0,0xf3]
|
||||
@ CHECK: vcls.s8 q8, q8 @ encoding: [0x60,0x04,0xf0,0xf3]
|
||||
vcls.s8 q8, q8
|
||||
// CHECK: vcls.s16 q8, q8 @ encoding: [0x60,0x04,0xf4,0xf3]
|
||||
@ CHECK: vcls.s16 q8, q8 @ encoding: [0x60,0x04,0xf4,0xf3]
|
||||
vcls.s16 q8, q8
|
||||
// CHECK: vcls.s32 q8, q8 @ encoding: [0x60,0x04,0xf8,0xf3]
|
||||
@ CHECK: vcls.s32 q8, q8 @ encoding: [0x60,0x04,0xf8,0xf3]
|
||||
vcls.s32 q8, q8
|
||||
|
||||
|
|
|
@ -1,36 +1,36 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vand d16, d17, d16 @ encoding: [0xb0,0x01,0x41,0xf2]
|
||||
@ CHECK: vand d16, d17, d16 @ encoding: [0xb0,0x01,0x41,0xf2]
|
||||
vand d16, d17, d16
|
||||
// CHECK: vand q8, q8, q9 @ encoding: [0xf2,0x01,0x40,0xf2]
|
||||
@ CHECK: vand q8, q8, q9 @ encoding: [0xf2,0x01,0x40,0xf2]
|
||||
vand q8, q8, q9
|
||||
|
||||
// CHECK: veor d16, d17, d16 @ encoding: [0xb0,0x01,0x41,0xf3]
|
||||
@ CHECK: veor d16, d17, d16 @ encoding: [0xb0,0x01,0x41,0xf3]
|
||||
veor d16, d17, d16
|
||||
// CHECK: veor q8, q8, q9 @ encoding: [0xf2,0x01,0x40,0xf3]
|
||||
@ CHECK: veor q8, q8, q9 @ encoding: [0xf2,0x01,0x40,0xf3]
|
||||
veor q8, q8, q9
|
||||
|
||||
// CHECK: vorr d16, d17, d16 @ encoding: [0xb0,0x01,0x61,0xf2]
|
||||
@ CHECK: vorr d16, d17, d16 @ encoding: [0xb0,0x01,0x61,0xf2]
|
||||
vorr d16, d17, d16
|
||||
// CHECK: vorr q8, q8, q9 @ encoding: [0xf2,0x01,0x60,0xf2]
|
||||
@ CHECK: vorr q8, q8, q9 @ encoding: [0xf2,0x01,0x60,0xf2]
|
||||
vorr q8, q8, q9
|
||||
|
||||
// CHECK: vbic d16, d17, d16 @ encoding: [0xb0,0x01,0x51,0xf2]
|
||||
@ CHECK: vbic d16, d17, d16 @ encoding: [0xb0,0x01,0x51,0xf2]
|
||||
vbic d16, d17, d16
|
||||
// CHECK: vbic q8, q8, q9 @ encoding: [0xf2,0x01,0x50,0xf2]
|
||||
@ CHECK: vbic q8, q8, q9 @ encoding: [0xf2,0x01,0x50,0xf2]
|
||||
vbic q8, q8, q9
|
||||
|
||||
// CHECK: vorn d16, d17, d16 @ encoding: [0xb0,0x01,0x71,0xf2]
|
||||
@ CHECK: vorn d16, d17, d16 @ encoding: [0xb0,0x01,0x71,0xf2]
|
||||
vorn d16, d17, d16
|
||||
// CHECK: vorn q8, q8, q9 @ encoding: [0xf2,0x01,0x70,0xf2]
|
||||
@ CHECK: vorn q8, q8, q9 @ encoding: [0xf2,0x01,0x70,0xf2]
|
||||
vorn q8, q8, q9
|
||||
|
||||
// CHECK: vmvn d16, d16 @ encoding: [0xa0,0x05,0xf0,0xf3]
|
||||
@ CHECK: vmvn d16, d16 @ encoding: [0xa0,0x05,0xf0,0xf3]
|
||||
vmvn d16, d16
|
||||
// CHECK: vmvn q8, q8 @ encoding: [0xe0,0x05,0xf0,0xf3]
|
||||
@ CHECK: vmvn q8, q8 @ encoding: [0xe0,0x05,0xf0,0xf3]
|
||||
vmvn q8, q8
|
||||
|
||||
// CHECK: vbsl d18, d17, d16 @ encoding: [0xb0,0x21,0x51,0xf3]
|
||||
@ CHECK: vbsl d18, d17, d16 @ encoding: [0xb0,0x21,0x51,0xf3]
|
||||
vbsl d18, d17, d16
|
||||
// CHECK: vbsl q8, q10, q9 @ encoding: [0xf2,0x01,0x54,0xf3]
|
||||
@ CHECK: vbsl q8, q10, q9 @ encoding: [0xf2,0x01,0x54,0xf3]
|
||||
vbsl q8, q10, q9
|
||||
|
|
|
@ -1,104 +1,104 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
|
||||
// FIXME: We cannot currently test the following instructions, which are
|
||||
// currently marked as for-disassembly only in the .td files:
|
||||
// - VCEQz
|
||||
// - VCGEz, VCLEz
|
||||
// - VCGTz, VCLTz
|
||||
@ FIXME: We cannot currently test the following instructions, which are
|
||||
@ currently marked as for-disassembly only in the .td files:
|
||||
@ - VCEQz
|
||||
@ - VCGEz, VCLEz
|
||||
@ - VCGTz, VCLTz
|
||||
|
||||
// CHECK: vceq.i8 d16, d16, d17 @ encoding: [0xb1,0x08,0x40,0xf3]
|
||||
@ CHECK: vceq.i8 d16, d16, d17 @ encoding: [0xb1,0x08,0x40,0xf3]
|
||||
vceq.i8 d16, d16, d17
|
||||
// CHECK: vceq.i16 d16, d16, d17 @ encoding: [0xb1,0x08,0x50,0xf3]
|
||||
@ CHECK: vceq.i16 d16, d16, d17 @ encoding: [0xb1,0x08,0x50,0xf3]
|
||||
vceq.i16 d16, d16, d17
|
||||
// CHECK: vceq.i32 d16, d16, d17 @ encoding: [0xb1,0x08,0x60,0xf3]
|
||||
@ CHECK: vceq.i32 d16, d16, d17 @ encoding: [0xb1,0x08,0x60,0xf3]
|
||||
vceq.i32 d16, d16, d17
|
||||
// CHECK: vceq.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x40,0xf2]
|
||||
@ CHECK: vceq.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x40,0xf2]
|
||||
vceq.f32 d16, d16, d17
|
||||
// CHECK: vceq.i8 q8, q8, q9 @ encoding: [0xf2,0x08,0x40,0xf3]
|
||||
@ CHECK: vceq.i8 q8, q8, q9 @ encoding: [0xf2,0x08,0x40,0xf3]
|
||||
vceq.i8 q8, q8, q9
|
||||
// CHECK: vceq.i16 q8, q8, q9 @ encoding: [0xf2,0x08,0x50,0xf3]
|
||||
@ CHECK: vceq.i16 q8, q8, q9 @ encoding: [0xf2,0x08,0x50,0xf3]
|
||||
vceq.i16 q8, q8, q9
|
||||
// CHECK: vceq.i32 q8, q8, q9 @ encoding: [0xf2,0x08,0x60,0xf3]
|
||||
@ CHECK: vceq.i32 q8, q8, q9 @ encoding: [0xf2,0x08,0x60,0xf3]
|
||||
vceq.i32 q8, q8, q9
|
||||
// CHECK: vceq.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x40,0xf2]
|
||||
@ CHECK: vceq.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x40,0xf2]
|
||||
vceq.f32 q8, q8, q9
|
||||
|
||||
// CHECK: vcge.s8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf2]
|
||||
@ CHECK: vcge.s8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf2]
|
||||
vcge.s8 d16, d16, d17
|
||||
// CHECK: vcge.s16 d16, d16, d17 @ encoding: [0xb1,0x03,0x50,0xf2]
|
||||
@ CHECK: vcge.s16 d16, d16, d17 @ encoding: [0xb1,0x03,0x50,0xf2]
|
||||
vcge.s16 d16, d16, d17
|
||||
// CHECK: vcge.s32 d16, d16, d17 @ encoding: [0xb1,0x03,0x60,0xf2]
|
||||
@ CHECK: vcge.s32 d16, d16, d17 @ encoding: [0xb1,0x03,0x60,0xf2]
|
||||
vcge.s32 d16, d16, d17
|
||||
// CHECK: vcge.u8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf3]
|
||||
@ CHECK: vcge.u8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf3]
|
||||
vcge.u8 d16, d16, d17
|
||||
// CHECK: vcge.u16 d16, d16, d17 @ encoding: [0xb1,0x03,0x50,0xf3]
|
||||
@ CHECK: vcge.u16 d16, d16, d17 @ encoding: [0xb1,0x03,0x50,0xf3]
|
||||
vcge.u16 d16, d16, d17
|
||||
// CHECK: vcge.u32 d16, d16, d17 @ encoding: [0xb1,0x03,0x60,0xf3]
|
||||
@ CHECK: vcge.u32 d16, d16, d17 @ encoding: [0xb1,0x03,0x60,0xf3]
|
||||
vcge.u32 d16, d16, d17
|
||||
// CHECK: vcge.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x40,0xf3]
|
||||
@ CHECK: vcge.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x40,0xf3]
|
||||
vcge.f32 d16, d16, d17
|
||||
// CHECK: vcge.s8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf2]
|
||||
@ CHECK: vcge.s8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf2]
|
||||
vcge.s8 q8, q8, q9
|
||||
// CHECK: vcge.s16 q8, q8, q9 @ encoding: [0xf2,0x03,0x50,0xf2]
|
||||
@ CHECK: vcge.s16 q8, q8, q9 @ encoding: [0xf2,0x03,0x50,0xf2]
|
||||
vcge.s16 q8, q8, q9
|
||||
// CHECK: vcge.s32 q8, q8, q9 @ encoding: [0xf2,0x03,0x60,0xf2]
|
||||
@ CHECK: vcge.s32 q8, q8, q9 @ encoding: [0xf2,0x03,0x60,0xf2]
|
||||
vcge.s32 q8, q8, q9
|
||||
// CHECK: vcge.u8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf3]
|
||||
@ CHECK: vcge.u8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf3]
|
||||
vcge.u8 q8, q8, q9
|
||||
// CHECK: vcge.u16 q8, q8, q9 @ encoding: [0xf2,0x03,0x50,0xf3]
|
||||
@ CHECK: vcge.u16 q8, q8, q9 @ encoding: [0xf2,0x03,0x50,0xf3]
|
||||
vcge.u16 q8, q8, q9
|
||||
// CHECK: vcge.u32 q8, q8, q9 @ encoding: [0xf2,0x03,0x60,0xf3]
|
||||
@ CHECK: vcge.u32 q8, q8, q9 @ encoding: [0xf2,0x03,0x60,0xf3]
|
||||
vcge.u32 q8, q8, q9
|
||||
// CHECK: vcge.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x40,0xf3]
|
||||
@ CHECK: vcge.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x40,0xf3]
|
||||
vcge.f32 q8, q8, q9
|
||||
// CHECK: vacge.f32 d16, d16, d17 @ encoding: [0xb1,0x0e,0x40,0xf3]
|
||||
@ CHECK: vacge.f32 d16, d16, d17 @ encoding: [0xb1,0x0e,0x40,0xf3]
|
||||
vacge.f32 d16, d16, d17
|
||||
// CHECK: vacge.f32 q8, q8, q9 @ encoding: [0xf2,0x0e,0x40,0xf3]
|
||||
@ CHECK: vacge.f32 q8, q8, q9 @ encoding: [0xf2,0x0e,0x40,0xf3]
|
||||
vacge.f32 q8, q8, q9
|
||||
|
||||
// CHECK: vcgt.s8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf2]
|
||||
@ CHECK: vcgt.s8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf2]
|
||||
vcgt.s8 d16, d16, d17
|
||||
// CHECK: vcgt.s16 d16, d16, d17 @ encoding: [0xa1,0x03,0x50,0xf2]
|
||||
@ CHECK: vcgt.s16 d16, d16, d17 @ encoding: [0xa1,0x03,0x50,0xf2]
|
||||
vcgt.s16 d16, d16, d17
|
||||
// CHECK: vcgt.s32 d16, d16, d17 @ encoding: [0xa1,0x03,0x60,0xf2]
|
||||
@ CHECK: vcgt.s32 d16, d16, d17 @ encoding: [0xa1,0x03,0x60,0xf2]
|
||||
vcgt.s32 d16, d16, d17
|
||||
// CHECK: vcgt.u8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf3]
|
||||
@ CHECK: vcgt.u8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf3]
|
||||
vcgt.u8 d16, d16, d17
|
||||
// CHECK: vcgt.u16 d16, d16, d17 @ encoding: [0xa1,0x03,0x50,0xf3]
|
||||
@ CHECK: vcgt.u16 d16, d16, d17 @ encoding: [0xa1,0x03,0x50,0xf3]
|
||||
vcgt.u16 d16, d16, d17
|
||||
// CHECK: vcgt.u32 d16, d16, d17 @ encoding: [0xa1,0x03,0x60,0xf3]
|
||||
@ CHECK: vcgt.u32 d16, d16, d17 @ encoding: [0xa1,0x03,0x60,0xf3]
|
||||
vcgt.u32 d16, d16, d17
|
||||
// CHECK: vcgt.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x60,0xf3]
|
||||
@ CHECK: vcgt.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x60,0xf3]
|
||||
vcgt.f32 d16, d16, d17
|
||||
// CHECK: vcgt.s8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf2]
|
||||
@ CHECK: vcgt.s8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf2]
|
||||
vcgt.s8 q8, q8, q9
|
||||
// CHECK: vcgt.s16 q8, q8, q9 @ encoding: [0xe2,0x03,0x50,0xf2]
|
||||
@ CHECK: vcgt.s16 q8, q8, q9 @ encoding: [0xe2,0x03,0x50,0xf2]
|
||||
vcgt.s16 q8, q8, q9
|
||||
// CHECK: vcgt.s32 q8, q8, q9 @ encoding: [0xe2,0x03,0x60,0xf2]
|
||||
@ CHECK: vcgt.s32 q8, q8, q9 @ encoding: [0xe2,0x03,0x60,0xf2]
|
||||
vcgt.s32 q8, q8, q9
|
||||
// CHECK: vcgt.u8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf3]
|
||||
@ CHECK: vcgt.u8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf3]
|
||||
vcgt.u8 q8, q8, q9
|
||||
// CHECK: vcgt.u16 q8, q8, q9 @ encoding: [0xe2,0x03,0x50,0xf3]
|
||||
@ CHECK: vcgt.u16 q8, q8, q9 @ encoding: [0xe2,0x03,0x50,0xf3]
|
||||
vcgt.u16 q8, q8, q9
|
||||
// CHECK: vcgt.u32 q8, q8, q9 @ encoding: [0xe2,0x03,0x60,0xf3]
|
||||
@ CHECK: vcgt.u32 q8, q8, q9 @ encoding: [0xe2,0x03,0x60,0xf3]
|
||||
vcgt.u32 q8, q8, q9
|
||||
// CHECK: vcgt.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x60,0xf3]
|
||||
@ CHECK: vcgt.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x60,0xf3]
|
||||
vcgt.f32 q8, q8, q9
|
||||
// CHECK: vacgt.f32 d16, d16, d17 @ encoding: [0xb1,0x0e,0x60,0xf3]
|
||||
@ CHECK: vacgt.f32 d16, d16, d17 @ encoding: [0xb1,0x0e,0x60,0xf3]
|
||||
vacgt.f32 d16, d16, d17
|
||||
// CHECK: vacgt.f32 q8, q8, q9 @ encoding: [0xf2,0x0e,0x60,0xf3]
|
||||
@ CHECK: vacgt.f32 q8, q8, q9 @ encoding: [0xf2,0x0e,0x60,0xf3]
|
||||
vacgt.f32 q8, q8, q9
|
||||
|
||||
// CHECK: vtst.8 d16, d16, d17 @ encoding: [0xb1,0x08,0x40,0xf2]
|
||||
@ CHECK: vtst.8 d16, d16, d17 @ encoding: [0xb1,0x08,0x40,0xf2]
|
||||
vtst.8 d16, d16, d17
|
||||
// CHECK: vtst.16 d16, d16, d17 @ encoding: [0xb1,0x08,0x50,0xf2]
|
||||
@ CHECK: vtst.16 d16, d16, d17 @ encoding: [0xb1,0x08,0x50,0xf2]
|
||||
vtst.16 d16, d16, d17
|
||||
// CHECK: vtst.32 d16, d16, d17 @ encoding: [0xb1,0x08,0x60,0xf2]
|
||||
@ CHECK: vtst.32 d16, d16, d17 @ encoding: [0xb1,0x08,0x60,0xf2]
|
||||
vtst.32 d16, d16, d17
|
||||
// CHECK: vtst.8 q8, q8, q9 @ encoding: [0xf2,0x08,0x40,0xf2]
|
||||
@ CHECK: vtst.8 q8, q8, q9 @ encoding: [0xf2,0x08,0x40,0xf2]
|
||||
vtst.8 q8, q8, q9
|
||||
// CHECK: vtst.16 q8, q8, q9 @ encoding: [0xf2,0x08,0x50,0xf2]
|
||||
@ CHECK: vtst.16 q8, q8, q9 @ encoding: [0xf2,0x08,0x50,0xf2]
|
||||
vtst.16 q8, q8, q9
|
||||
// CHECK: vtst.32 q8, q8, q9 @ encoding: [0xf2,0x08,0x60,0xf2]
|
||||
@ CHECK: vtst.32 q8, q8, q9 @ encoding: [0xf2,0x08,0x60,0xf2]
|
||||
vtst.32 q8, q8, q9
|
||||
|
|
|
@ -1,34 +1,34 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vcvt.s32.f32 d16, d16 @ encoding: [0x20,0x07,0xfb,0xf3]
|
||||
@ CHECK: vcvt.s32.f32 d16, d16 @ encoding: [0x20,0x07,0xfb,0xf3]
|
||||
vcvt.s32.f32 d16, d16
|
||||
// CHECK: vcvt.u32.f32 d16, d16 @ encoding: [0xa0,0x07,0xfb,0xf3]
|
||||
@ CHECK: vcvt.u32.f32 d16, d16 @ encoding: [0xa0,0x07,0xfb,0xf3]
|
||||
vcvt.u32.f32 d16, d16
|
||||
// CHECK: vcvt.f32.s32 d16, d16 @ encoding: [0x20,0x06,0xfb,0xf3]
|
||||
@ CHECK: vcvt.f32.s32 d16, d16 @ encoding: [0x20,0x06,0xfb,0xf3]
|
||||
vcvt.f32.s32 d16, d16
|
||||
// CHECK: vcvt.f32.u32 d16, d16 @ encoding: [0xa0,0x06,0xfb,0xf3]
|
||||
@ CHECK: vcvt.f32.u32 d16, d16 @ encoding: [0xa0,0x06,0xfb,0xf3]
|
||||
vcvt.f32.u32 d16, d16
|
||||
// CHECK: vcvt.s32.f32 q8, q8 @ encoding: [0x60,0x07,0xfb,0xf3]
|
||||
@ CHECK: vcvt.s32.f32 q8, q8 @ encoding: [0x60,0x07,0xfb,0xf3]
|
||||
vcvt.s32.f32 q8, q8
|
||||
// CHECK: vcvt.u32.f32 q8, q8 @ encoding: [0xe0,0x07,0xfb,0xf3]
|
||||
@ CHECK: vcvt.u32.f32 q8, q8 @ encoding: [0xe0,0x07,0xfb,0xf3]
|
||||
vcvt.u32.f32 q8, q8
|
||||
// CHECK: vcvt.f32.s32 q8, q8 @ encoding: [0x60,0x06,0xfb,0xf3]
|
||||
@ CHECK: vcvt.f32.s32 q8, q8 @ encoding: [0x60,0x06,0xfb,0xf3]
|
||||
vcvt.f32.s32 q8, q8
|
||||
// CHECK: vcvt.f32.u32 q8, q8 @ encoding: [0xe0,0x06,0xfb,0xf3]
|
||||
@ CHECK: vcvt.f32.u32 q8, q8 @ encoding: [0xe0,0x06,0xfb,0xf3]
|
||||
vcvt.f32.u32 q8, q8
|
||||
// CHECK: vcvt.s32.f32 d16, d16, #1 @ encoding: [0x30,0x0f,0xff,0xf2]
|
||||
@ CHECK: vcvt.s32.f32 d16, d16, #1 @ encoding: [0x30,0x0f,0xff,0xf2]
|
||||
vcvt.s32.f32 d16, d16, #1
|
||||
// CHECK: vcvt.u32.f32 d16, d16, #1 @ encoding: [0x30,0x0f,0xff,0xf3]
|
||||
@ CHECK: vcvt.u32.f32 d16, d16, #1 @ encoding: [0x30,0x0f,0xff,0xf3]
|
||||
vcvt.u32.f32 d16, d16, #1
|
||||
// CHECK: vcvt.f32.s32 d16, d16, #1 @ encoding: [0x30,0x0e,0xff,0xf2]
|
||||
@ CHECK: vcvt.f32.s32 d16, d16, #1 @ encoding: [0x30,0x0e,0xff,0xf2]
|
||||
vcvt.f32.s32 d16, d16, #1
|
||||
// CHECK: vcvt.f32.u32 d16, d16, #1 @ encoding: [0x30,0x0e,0xff,0xf3]
|
||||
@ CHECK: vcvt.f32.u32 d16, d16, #1 @ encoding: [0x30,0x0e,0xff,0xf3]
|
||||
vcvt.f32.u32 d16, d16, #1
|
||||
// CHECK: vcvt.s32.f32 q8, q8, #1 @ encoding: [0x70,0x0f,0xff,0xf2]
|
||||
@ CHECK: vcvt.s32.f32 q8, q8, #1 @ encoding: [0x70,0x0f,0xff,0xf2]
|
||||
vcvt.s32.f32 q8, q8, #1
|
||||
// CHECK: vcvt.u32.f32 q8, q8, #1 @ encoding: [0x70,0x0f,0xff,0xf3]
|
||||
@ CHECK: vcvt.u32.f32 q8, q8, #1 @ encoding: [0x70,0x0f,0xff,0xf3]
|
||||
vcvt.u32.f32 q8, q8, #1
|
||||
// CHECK: vcvt.f32.s32 q8, q8, #1 @ encoding: [0x70,0x0e,0xff,0xf2]
|
||||
@ CHECK: vcvt.f32.s32 q8, q8, #1 @ encoding: [0x70,0x0e,0xff,0xf2]
|
||||
vcvt.f32.s32 q8, q8, #1
|
||||
// CHECK: vcvt.f32.u32 q8, q8, #1 @ encoding: [0x70,0x0e,0xff,0xf3]
|
||||
@ CHECK: vcvt.f32.u32 q8, q8, #1 @ encoding: [0x70,0x0e,0xff,0xf3]
|
||||
vcvt.f32.u32 q8, q8, #1
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
|
||||
// CHECK: vdup.8 d16, r0 @ encoding: [0x90,0x0b,0xc0,0xee]
|
||||
@ CHECK: vdup.8 d16, r0 @ encoding: [0x90,0x0b,0xc0,0xee]
|
||||
vdup.8 d16, r0
|
||||
// CHECK: vdup.16 d16, r0 @ encoding: [0xb0,0x0b,0x80,0xee]
|
||||
@ CHECK: vdup.16 d16, r0 @ encoding: [0xb0,0x0b,0x80,0xee]
|
||||
vdup.16 d16, r0
|
||||
// CHECK: vdup.32 d16, r0 @ encoding: [0x90,0x0b,0x80,0xee]
|
||||
@ CHECK: vdup.32 d16, r0 @ encoding: [0x90,0x0b,0x80,0xee]
|
||||
vdup.32 d16, r0
|
||||
// CHECK: vdup.8 q8, r0 @ encoding: [0x90,0x0b,0xe0,0xee]
|
||||
@ CHECK: vdup.8 q8, r0 @ encoding: [0x90,0x0b,0xe0,0xee]
|
||||
vdup.8 q8, r0
|
||||
// CHECK: vdup.16 q8, r0 @ encoding: [0xb0,0x0b,0xa0,0xee]
|
||||
@ CHECK: vdup.16 q8, r0 @ encoding: [0xb0,0x0b,0xa0,0xee]
|
||||
vdup.16 q8, r0
|
||||
// CHECK: vdup.32 q8, r0 @ encoding: [0x90,0x0b,0xa0,0xee]
|
||||
@ CHECK: vdup.32 q8, r0 @ encoding: [0x90,0x0b,0xa0,0xee]
|
||||
vdup.32 q8, r0
|
||||
// CHECK: vdup.8 d16, d16[1] @ encoding: [0x20,0x0c,0xf3,0xf3]
|
||||
@ CHECK: vdup.8 d16, d16[1] @ encoding: [0x20,0x0c,0xf3,0xf3]
|
||||
vdup.8 d16, d16[1]
|
||||
// CHECK: vdup.16 d16, d16[1] @ encoding: [0x20,0x0c,0xf6,0xf3]
|
||||
@ CHECK: vdup.16 d16, d16[1] @ encoding: [0x20,0x0c,0xf6,0xf3]
|
||||
vdup.16 d16, d16[1]
|
||||
// CHECK: vdup.32 d16, d16[1] @ encoding: [0x20,0x0c,0xfc,0xf3]
|
||||
@ CHECK: vdup.32 d16, d16[1] @ encoding: [0x20,0x0c,0xfc,0xf3]
|
||||
vdup.32 d16, d16[1]
|
||||
// CHECK: vdup.8 q8, d16[1] @ encoding: [0x60,0x0c,0xf3,0xf3]
|
||||
@ CHECK: vdup.8 q8, d16[1] @ encoding: [0x60,0x0c,0xf3,0xf3]
|
||||
vdup.8 q8, d16[1]
|
||||
// CHECK: vdup.16 q8, d16[1] @ encoding: [0x60,0x0c,0xf6,0xf3]
|
||||
@ CHECK: vdup.16 q8, d16[1] @ encoding: [0x60,0x0c,0xf6,0xf3]
|
||||
vdup.16 q8, d16[1]
|
||||
// CHECK: vdup.32 q8, d16[1] @ encoding: [0x60,0x0c,0xfc,0xf3]
|
||||
@ CHECK: vdup.32 q8, d16[1] @ encoding: [0x60,0x0c,0xfc,0xf3]
|
||||
vdup.32 q8, d16[1]
|
||||
|
|
|
@ -1,58 +1,58 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vmin.s8 d16, d16, d17 @ encoding: [0xb1,0x06,0x40,0xf2]
|
||||
@ CHECK: vmin.s8 d16, d16, d17 @ encoding: [0xb1,0x06,0x40,0xf2]
|
||||
vmin.s8 d16, d16, d17
|
||||
// CHECK: vmin.s16 d16, d16, d17 @ encoding: [0xb1,0x06,0x50,0xf2]
|
||||
@ CHECK: vmin.s16 d16, d16, d17 @ encoding: [0xb1,0x06,0x50,0xf2]
|
||||
vmin.s16 d16, d16, d17
|
||||
// CHECK: vmin.s32 d16, d16, d17 @ encoding: [0xb1,0x06,0x60,0xf2]
|
||||
@ CHECK: vmin.s32 d16, d16, d17 @ encoding: [0xb1,0x06,0x60,0xf2]
|
||||
vmin.s32 d16, d16, d17
|
||||
// CHECK: vmin.u8 d16, d16, d17 @ encoding: [0xb1,0x06,0x40,0xf3]
|
||||
@ CHECK: vmin.u8 d16, d16, d17 @ encoding: [0xb1,0x06,0x40,0xf3]
|
||||
vmin.u8 d16, d16, d17
|
||||
// CHECK: vmin.u16 d16, d16, d17 @ encoding: [0xb1,0x06,0x50,0xf3]
|
||||
@ CHECK: vmin.u16 d16, d16, d17 @ encoding: [0xb1,0x06,0x50,0xf3]
|
||||
vmin.u16 d16, d16, d17
|
||||
// CHECK: vmin.u32 d16, d16, d17 @ encoding: [0xb1,0x06,0x60,0xf3]
|
||||
@ CHECK: vmin.u32 d16, d16, d17 @ encoding: [0xb1,0x06,0x60,0xf3]
|
||||
vmin.u32 d16, d16, d17
|
||||
// CHECK: vmin.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x60,0xf2]
|
||||
@ CHECK: vmin.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x60,0xf2]
|
||||
vmin.f32 d16, d16, d17
|
||||
// CHECK: vmin.s8 q8, q8, q9 @ encoding: [0xf2,0x06,0x40,0xf2]
|
||||
@ CHECK: vmin.s8 q8, q8, q9 @ encoding: [0xf2,0x06,0x40,0xf2]
|
||||
vmin.s8 q8, q8, q9
|
||||
// CHECK: vmin.s16 q8, q8, q9 @ encoding: [0xf2,0x06,0x50,0xf2]
|
||||
@ CHECK: vmin.s16 q8, q8, q9 @ encoding: [0xf2,0x06,0x50,0xf2]
|
||||
vmin.s16 q8, q8, q9
|
||||
// CHECK: vmin.s32 q8, q8, q9 @ encoding: [0xf2,0x06,0x60,0xf2]
|
||||
@ CHECK: vmin.s32 q8, q8, q9 @ encoding: [0xf2,0x06,0x60,0xf2]
|
||||
vmin.s32 q8, q8, q9
|
||||
// CHECK: vmin.u8 q8, q8, q9 @ encoding: [0xf2,0x06,0x40,0xf3]
|
||||
@ CHECK: vmin.u8 q8, q8, q9 @ encoding: [0xf2,0x06,0x40,0xf3]
|
||||
vmin.u8 q8, q8, q9
|
||||
// CHECK: vmin.u16 q8, q8, q9 @ encoding: [0xf2,0x06,0x50,0xf3]
|
||||
@ CHECK: vmin.u16 q8, q8, q9 @ encoding: [0xf2,0x06,0x50,0xf3]
|
||||
vmin.u16 q8, q8, q9
|
||||
// CHECK: vmin.u32 q8, q8, q9 @ encoding: [0xf2,0x06,0x60,0xf3]
|
||||
@ CHECK: vmin.u32 q8, q8, q9 @ encoding: [0xf2,0x06,0x60,0xf3]
|
||||
vmin.u32 q8, q8, q9
|
||||
// CHECK: vmin.f32 q8, q8, q9 @ encoding: [0xe2,0x0f,0x60,0xf2]
|
||||
@ CHECK: vmin.f32 q8, q8, q9 @ encoding: [0xe2,0x0f,0x60,0xf2]
|
||||
vmin.f32 q8, q8, q9
|
||||
// CHECK: vmax.s8 d16, d16, d17 @ encoding: [0xa1,0x06,0x40,0xf2]
|
||||
@ CHECK: vmax.s8 d16, d16, d17 @ encoding: [0xa1,0x06,0x40,0xf2]
|
||||
vmax.s8 d16, d16, d17
|
||||
// CHECK: vmax.s16 d16, d16, d17 @ encoding: [0xa1,0x06,0x50,0xf2]
|
||||
@ CHECK: vmax.s16 d16, d16, d17 @ encoding: [0xa1,0x06,0x50,0xf2]
|
||||
vmax.s16 d16, d16, d17
|
||||
// CHECK: vmax.s32 d16, d16, d17 @ encoding: [0xa1,0x06,0x60,0xf2]
|
||||
@ CHECK: vmax.s32 d16, d16, d17 @ encoding: [0xa1,0x06,0x60,0xf2]
|
||||
vmax.s32 d16, d16, d17
|
||||
// CHECK: vmax.u8 d16, d16, d17 @ encoding: [0xa1,0x06,0x40,0xf3]
|
||||
@ CHECK: vmax.u8 d16, d16, d17 @ encoding: [0xa1,0x06,0x40,0xf3]
|
||||
vmax.u8 d16, d16, d17
|
||||
// CHECK: vmax.u16 d16, d16, d17 @ encoding: [0xa1,0x06,0x50,0xf3]
|
||||
@ CHECK: vmax.u16 d16, d16, d17 @ encoding: [0xa1,0x06,0x50,0xf3]
|
||||
vmax.u16 d16, d16, d17
|
||||
// CHECK: vmax.u32 d16, d16, d17 @ encoding: [0xa1,0x06,0x60,0xf3]
|
||||
@ CHECK: vmax.u32 d16, d16, d17 @ encoding: [0xa1,0x06,0x60,0xf3]
|
||||
vmax.u32 d16, d16, d17
|
||||
// CHECK: vmax.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x40,0xf2]
|
||||
@ CHECK: vmax.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x40,0xf2]
|
||||
vmax.f32 d16, d16, d17
|
||||
// CHECK: vmax.s8 q8, q8, q9 @ encoding: [0xe2,0x06,0x40,0xf2]
|
||||
@ CHECK: vmax.s8 q8, q8, q9 @ encoding: [0xe2,0x06,0x40,0xf2]
|
||||
vmax.s8 q8, q8, q9
|
||||
// CHECK: vmax.s16 q8, q8, q9 @ encoding: [0xe2,0x06,0x50,0xf2]
|
||||
@ CHECK: vmax.s16 q8, q8, q9 @ encoding: [0xe2,0x06,0x50,0xf2]
|
||||
vmax.s16 q8, q8, q9
|
||||
// CHECK: vmax.s32 q8, q8, q9 @ encoding: [0xe2,0x06,0x60,0xf2]
|
||||
@ CHECK: vmax.s32 q8, q8, q9 @ encoding: [0xe2,0x06,0x60,0xf2]
|
||||
vmax.s32 q8, q8, q9
|
||||
// CHECK: vmax.u8 q8, q8, q9 @ encoding: [0xe2,0x06,0x40,0xf3]
|
||||
@ CHECK: vmax.u8 q8, q8, q9 @ encoding: [0xe2,0x06,0x40,0xf3]
|
||||
vmax.u8 q8, q8, q9
|
||||
// CHECK: vmax.u16 q8, q8, q9 @ encoding: [0xe2,0x06,0x50,0xf3]
|
||||
@ CHECK: vmax.u16 q8, q8, q9 @ encoding: [0xe2,0x06,0x50,0xf3]
|
||||
vmax.u16 q8, q8, q9
|
||||
// CHECK: vmax.u32 q8, q8, q9 @ encoding: [0xe2,0x06,0x60,0xf3]
|
||||
@ CHECK: vmax.u32 q8, q8, q9 @ encoding: [0xe2,0x06,0x60,0xf3]
|
||||
vmax.u32 q8, q8, q9
|
||||
// CHECK: vmax.f32 q8, q8, q9 @ encoding: [0xe2,0x0f,0x40,0xf2]
|
||||
@ CHECK: vmax.f32 q8, q8, q9 @ encoding: [0xe2,0x0f,0x40,0xf2]
|
||||
vmax.f32 q8, q8, q9
|
||||
|
|
|
@ -1,117 +1,117 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
|
||||
// CHECK: vmov.i8 d16, #0x8 @ encoding: [0x18,0x0e,0xc0,0xf2]
|
||||
@ CHECK: vmov.i8 d16, #0x8 @ encoding: [0x18,0x0e,0xc0,0xf2]
|
||||
vmov.i8 d16, #0x8
|
||||
// CHECK: vmov.i16 d16, #0x10 @ encoding: [0x10,0x08,0xc1,0xf2]
|
||||
@ CHECK: vmov.i16 d16, #0x10 @ encoding: [0x10,0x08,0xc1,0xf2]
|
||||
vmov.i16 d16, #0x10
|
||||
// CHECK: vmov.i16 d16, #0x1000 @ encoding: [0x10,0x0a,0xc1,0xf2]
|
||||
@ CHECK: vmov.i16 d16, #0x1000 @ encoding: [0x10,0x0a,0xc1,0xf2]
|
||||
vmov.i16 d16, #0x1000
|
||||
// CHECK: vmov.i32 d16, #0x20 @ encoding: [0x10,0x00,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 d16, #0x20 @ encoding: [0x10,0x00,0xc2,0xf2]
|
||||
vmov.i32 d16, #0x20
|
||||
// CHECK: vmov.i32 d16, #0x2000 @ encoding: [0x10,0x02,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 d16, #0x2000 @ encoding: [0x10,0x02,0xc2,0xf2]
|
||||
vmov.i32 d16, #0x2000
|
||||
// CHECK: vmov.i32 d16, #0x200000 @ encoding: [0x10,0x04,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 d16, #0x200000 @ encoding: [0x10,0x04,0xc2,0xf2]
|
||||
vmov.i32 d16, #0x200000
|
||||
// CHECK: vmov.i32 d16, #0x20000000 @ encoding: [0x10,0x06,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 d16, #0x20000000 @ encoding: [0x10,0x06,0xc2,0xf2]
|
||||
vmov.i32 d16, #0x20000000
|
||||
// CHECK: vmov.i32 d16, #0x20FF @ encoding: [0x10,0x0c,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 d16, #0x20FF @ encoding: [0x10,0x0c,0xc2,0xf2]
|
||||
vmov.i32 d16, #0x20FF
|
||||
// CHECK: vmov.i32 d16, #0x20FFFF @ encoding: [0x10,0x0d,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 d16, #0x20FFFF @ encoding: [0x10,0x0d,0xc2,0xf2]
|
||||
vmov.i32 d16, #0x20FFFF
|
||||
// CHECK: vmov.i64 d16, #0xFF0000FF0000FFFF @ encoding: [0x33,0x0e,0xc1,0xf3]
|
||||
@ CHECK: vmov.i64 d16, #0xFF0000FF0000FFFF @ encoding: [0x33,0x0e,0xc1,0xf3]
|
||||
vmov.i64 d16, #0xFF0000FF0000FFFF
|
||||
// CHECK: vmov.i8 q8, #0x8 @ encoding: [0x58,0x0e,0xc0,0xf2]
|
||||
@ CHECK: vmov.i8 q8, #0x8 @ encoding: [0x58,0x0e,0xc0,0xf2]
|
||||
vmov.i8 q8, #0x8
|
||||
// CHECK: vmov.i16 q8, #0x10 @ encoding: [0x50,0x08,0xc1,0xf2]
|
||||
@ CHECK: vmov.i16 q8, #0x10 @ encoding: [0x50,0x08,0xc1,0xf2]
|
||||
vmov.i16 q8, #0x10
|
||||
// CHECK: vmov.i16 q8, #0x1000 @ encoding: [0x50,0x0a,0xc1,0xf2]
|
||||
@ CHECK: vmov.i16 q8, #0x1000 @ encoding: [0x50,0x0a,0xc1,0xf2]
|
||||
vmov.i16 q8, #0x1000
|
||||
// CHECK: vmov.i32 q8, #0x20 @ encoding: [0x50,0x00,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 q8, #0x20 @ encoding: [0x50,0x00,0xc2,0xf2]
|
||||
vmov.i32 q8, #0x20
|
||||
// CHECK: vmov.i32 q8, #0x2000 @ encoding: [0x50,0x02,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 q8, #0x2000 @ encoding: [0x50,0x02,0xc2,0xf2]
|
||||
vmov.i32 q8, #0x2000
|
||||
// CHECK: vmov.i32 q8, #0x200000 @ encoding: [0x50,0x04,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 q8, #0x200000 @ encoding: [0x50,0x04,0xc2,0xf2]
|
||||
vmov.i32 q8, #0x200000
|
||||
// CHECK: vmov.i32 q8, #0x20000000 @ encoding: [0x50,0x06,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 q8, #0x20000000 @ encoding: [0x50,0x06,0xc2,0xf2]
|
||||
vmov.i32 q8, #0x20000000
|
||||
// CHECK: vmov.i32 q8, #0x20FF @ encoding: [0x50,0x0c,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 q8, #0x20FF @ encoding: [0x50,0x0c,0xc2,0xf2]
|
||||
vmov.i32 q8, #0x20FF
|
||||
// CHECK: vmov.i32 q8, #0x20FFFF @ encoding: [0x50,0x0d,0xc2,0xf2]
|
||||
@ CHECK: vmov.i32 q8, #0x20FFFF @ encoding: [0x50,0x0d,0xc2,0xf2]
|
||||
vmov.i32 q8, #0x20FFFF
|
||||
// CHECK: vmov.i64 q8, #0xFF0000FF0000FFFF @ encoding: [0x73,0x0e,0xc1,0xf3]
|
||||
@ CHECK: vmov.i64 q8, #0xFF0000FF0000FFFF @ encoding: [0x73,0x0e,0xc1,0xf3]
|
||||
vmov.i64 q8, #0xFF0000FF0000FFFF
|
||||
// CHECK: vmvn.i16 d16, #0x10 @ encoding: [0x30,0x08,0xc1,0xf2]
|
||||
@ CHECK: vmvn.i16 d16, #0x10 @ encoding: [0x30,0x08,0xc1,0xf2]
|
||||
vmvn.i16 d16, #0x10
|
||||
// CHECK: vmvn.i16 d16, #0x1000 @ encoding: [0x30,0x0a,0xc1,0xf2]
|
||||
@ CHECK: vmvn.i16 d16, #0x1000 @ encoding: [0x30,0x0a,0xc1,0xf2]
|
||||
vmvn.i16 d16, #0x1000
|
||||
// CHECK: vmvn.i32 d16, #0x20 @ encoding: [0x30,0x00,0xc2,0xf2]
|
||||
@ CHECK: vmvn.i32 d16, #0x20 @ encoding: [0x30,0x00,0xc2,0xf2]
|
||||
vmvn.i32 d16, #0x20
|
||||
// CHECK: vmvn.i32 d16, #0x2000 @ encoding: [0x30,0x02,0xc2,0xf2]
|
||||
@ CHECK: vmvn.i32 d16, #0x2000 @ encoding: [0x30,0x02,0xc2,0xf2]
|
||||
vmvn.i32 d16, #0x2000
|
||||
// CHECK: vmvn.i32 d16, #0x200000 @ encoding: [0x30,0x04,0xc2,0xf2]
|
||||
@ CHECK: vmvn.i32 d16, #0x200000 @ encoding: [0x30,0x04,0xc2,0xf2]
|
||||
vmvn.i32 d16, #0x200000
|
||||
// CHECK: vmvn.i32 d16, #0x20000000 @ encoding: [0x30,0x06,0xc2,0xf2]
|
||||
@ CHECK: vmvn.i32 d16, #0x20000000 @ encoding: [0x30,0x06,0xc2,0xf2]
|
||||
vmvn.i32 d16, #0x20000000
|
||||
// CHECK: vmvn.i32 d16, #0x20FF @ encoding: [0x30,0x0c,0xc2,0xf2]
|
||||
@ CHECK: vmvn.i32 d16, #0x20FF @ encoding: [0x30,0x0c,0xc2,0xf2]
|
||||
vmvn.i32 d16, #0x20FF
|
||||
// CHECK: vmvn.i32 d16, #0x20FFFF @ encoding: [0x30,0x0d,0xc2,0xf2]
|
||||
@ CHECK: vmvn.i32 d16, #0x20FFFF @ encoding: [0x30,0x0d,0xc2,0xf2]
|
||||
vmvn.i32 d16, #0x20FFFF
|
||||
// CHECK: vmovl.s8 q8, d16 @ encoding: [0x30,0x0a,0xc8,0xf2]
|
||||
@ CHECK: vmovl.s8 q8, d16 @ encoding: [0x30,0x0a,0xc8,0xf2]
|
||||
vmovl.s8 q8, d16
|
||||
// CHECK: vmovl.s16 q8, d16 @ encoding: [0x30,0x0a,0xd0,0xf2]
|
||||
@ CHECK: vmovl.s16 q8, d16 @ encoding: [0x30,0x0a,0xd0,0xf2]
|
||||
vmovl.s16 q8, d16
|
||||
// CHECK: vmovl.s32 q8, d16 @ encoding: [0x30,0x0a,0xe0,0xf2]
|
||||
@ CHECK: vmovl.s32 q8, d16 @ encoding: [0x30,0x0a,0xe0,0xf2]
|
||||
vmovl.s32 q8, d16
|
||||
// CHECK: vmovl.u8 q8, d16 @ encoding: [0x30,0x0a,0xc8,0xf3]
|
||||
@ CHECK: vmovl.u8 q8, d16 @ encoding: [0x30,0x0a,0xc8,0xf3]
|
||||
vmovl.u8 q8, d16
|
||||
// CHECK: vmovl.u16 q8, d16 @ encoding: [0x30,0x0a,0xd0,0xf3]
|
||||
@ CHECK: vmovl.u16 q8, d16 @ encoding: [0x30,0x0a,0xd0,0xf3]
|
||||
vmovl.u16 q8, d16
|
||||
// CHECK: vmovl.u32 q8, d16 @ encoding: [0x30,0x0a,0xe0,0xf3]
|
||||
@ CHECK: vmovl.u32 q8, d16 @ encoding: [0x30,0x0a,0xe0,0xf3]
|
||||
vmovl.u32 q8, d16
|
||||
// CHECK: vmovn.i16 d16, q8 @ encoding: [0x20,0x02,0xf2,0xf3]
|
||||
@ CHECK: vmovn.i16 d16, q8 @ encoding: [0x20,0x02,0xf2,0xf3]
|
||||
vmovn.i16 d16, q8
|
||||
// CHECK: vmovn.i32 d16, q8 @ encoding: [0x20,0x02,0xf6,0xf3]
|
||||
@ CHECK: vmovn.i32 d16, q8 @ encoding: [0x20,0x02,0xf6,0xf3]
|
||||
vmovn.i32 d16, q8
|
||||
// CHECK: vmovn.i64 d16, q8 @ encoding: [0x20,0x02,0xfa,0xf3]
|
||||
@ CHECK: vmovn.i64 d16, q8 @ encoding: [0x20,0x02,0xfa,0xf3]
|
||||
vmovn.i64 d16, q8
|
||||
// CHECK: vqmovn.s16 d16, q8 @ encoding: [0xa0,0x02,0xf2,0xf3]
|
||||
@ CHECK: vqmovn.s16 d16, q8 @ encoding: [0xa0,0x02,0xf2,0xf3]
|
||||
vqmovn.s16 d16, q8
|
||||
// CHECK: vqmovn.s32 d16, q8 @ encoding: [0xa0,0x02,0xf6,0xf3]
|
||||
@ CHECK: vqmovn.s32 d16, q8 @ encoding: [0xa0,0x02,0xf6,0xf3]
|
||||
vqmovn.s32 d16, q8
|
||||
// CHECK: vqmovn.s64 d16, q8 @ encoding: [0xa0,0x02,0xfa,0xf3]
|
||||
@ CHECK: vqmovn.s64 d16, q8 @ encoding: [0xa0,0x02,0xfa,0xf3]
|
||||
vqmovn.s64 d16, q8
|
||||
// CHECK: vqmovn.u16 d16, q8 @ encoding: [0xe0,0x02,0xf2,0xf3]
|
||||
@ CHECK: vqmovn.u16 d16, q8 @ encoding: [0xe0,0x02,0xf2,0xf3]
|
||||
vqmovn.u16 d16, q8
|
||||
// CHECK: vqmovn.u32 d16, q8 @ encoding: [0xe0,0x02,0xf6,0xf3]
|
||||
@ CHECK: vqmovn.u32 d16, q8 @ encoding: [0xe0,0x02,0xf6,0xf3]
|
||||
vqmovn.u32 d16, q8
|
||||
// CHECK: vqmovn.u64 d16, q8 @ encoding: [0xe0,0x02,0xfa,0xf3]
|
||||
@ CHECK: vqmovn.u64 d16, q8 @ encoding: [0xe0,0x02,0xfa,0xf3]
|
||||
vqmovn.u64 d16, q8
|
||||
// CHECK: vqmovun.s16 d16, q8 @ encoding: [0x60,0x02,0xf2,0xf3]
|
||||
@ CHECK: vqmovun.s16 d16, q8 @ encoding: [0x60,0x02,0xf2,0xf3]
|
||||
vqmovun.s16 d16, q8
|
||||
// CHECK: vqmovun.s32 d16, q8 @ encoding: [0x60,0x02,0xf6,0xf3]
|
||||
@ CHECK: vqmovun.s32 d16, q8 @ encoding: [0x60,0x02,0xf6,0xf3]
|
||||
vqmovun.s32 d16, q8
|
||||
// CHECK: vqmovun.s64 d16, q8 @ encoding: [0x60,0x02,0xfa,0xf3]
|
||||
@ CHECK: vqmovun.s64 d16, q8 @ encoding: [0x60,0x02,0xfa,0xf3]
|
||||
vqmovun.s64 d16, q8
|
||||
// CHECK: vmov.s8 r0, d16[1] @ encoding: [0xb0,0x0b,0x50,0xee]
|
||||
@ CHECK: vmov.s8 r0, d16[1] @ encoding: [0xb0,0x0b,0x50,0xee]
|
||||
vmov.s8 r0, d16[1]
|
||||
// CHECK: vmov.s16 r0, d16[1] @ encoding: [0xf0,0x0b,0x10,0xee]
|
||||
@ CHECK: vmov.s16 r0, d16[1] @ encoding: [0xf0,0x0b,0x10,0xee]
|
||||
vmov.s16 r0, d16[1]
|
||||
// CHECK: vmov.u8 r0, d16[1] @ encoding: [0xb0,0x0b,0xd0,0xee]
|
||||
@ CHECK: vmov.u8 r0, d16[1] @ encoding: [0xb0,0x0b,0xd0,0xee]
|
||||
vmov.u8 r0, d16[1]
|
||||
// CHECK: vmov.u16 r0, d16[1] @ encoding: [0xf0,0x0b,0x90,0xee]
|
||||
@ CHECK: vmov.u16 r0, d16[1] @ encoding: [0xf0,0x0b,0x90,0xee]
|
||||
vmov.u16 r0, d16[1]
|
||||
// CHECK: vmov.32 r0, d16[1] @ encoding: [0x90,0x0b,0x30,0xee]
|
||||
@ CHECK: vmov.32 r0, d16[1] @ encoding: [0x90,0x0b,0x30,0xee]
|
||||
vmov.32 r0, d16[1]
|
||||
// CHECK: vmov.8 d16[1], r1 @ encoding: [0xb0,0x1b,0x40,0xee]
|
||||
@ CHECK: vmov.8 d16[1], r1 @ encoding: [0xb0,0x1b,0x40,0xee]
|
||||
vmov.8 d16[1], r1
|
||||
// CHECK: vmov.16 d16[1], r1 @ encoding: [0xf0,0x1b,0x00,0xee]
|
||||
@ CHECK: vmov.16 d16[1], r1 @ encoding: [0xf0,0x1b,0x00,0xee]
|
||||
vmov.16 d16[1], r1
|
||||
// CHECK: vmov.32 d16[1], r1 @ encoding: [0x90,0x1b,0x20,0xee]
|
||||
@ CHECK: vmov.32 d16[1], r1 @ encoding: [0x90,0x1b,0x20,0xee]
|
||||
vmov.32 d16[1], r1
|
||||
// CHECK: vmov.8 d18[1], r1 @ encoding: [0xb0,0x1b,0x42,0xee]
|
||||
@ CHECK: vmov.8 d18[1], r1 @ encoding: [0xb0,0x1b,0x42,0xee]
|
||||
vmov.8 d18[1], r1
|
||||
// CHECK: vmov.16 d18[1], r1 @ encoding: [0xf0,0x1b,0x02,0xee]
|
||||
@ CHECK: vmov.16 d18[1], r1 @ encoding: [0xf0,0x1b,0x02,0xee]
|
||||
vmov.16 d18[1], r1
|
||||
// CHECK: vmov.32 d18[1], r1 @ encoding: [0x90,0x1b,0x22,0xee]
|
||||
@ CHECK: vmov.32 d18[1], r1 @ encoding: [0x90,0x1b,0x22,0xee]
|
||||
vmov.32 d18[1], r1
|
||||
|
|
|
@ -1,67 +1,67 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
|
||||
// CHECK: vmla.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf2]
|
||||
@ CHECK: vmla.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf2]
|
||||
vmla.i8 d16, d18, d17
|
||||
// CHECK: vmla.i16 d16, d18, d17 @ encoding: [0xa1,0x09,0x52,0xf2]
|
||||
@ CHECK: vmla.i16 d16, d18, d17 @ encoding: [0xa1,0x09,0x52,0xf2]
|
||||
vmla.i16 d16, d18, d17
|
||||
// CHECK: vmla.i16 d16, d18, d17 @ encoding: [0xa1,0x09,0x52,0xf2]
|
||||
@ CHECK: vmla.i16 d16, d18, d17 @ encoding: [0xa1,0x09,0x52,0xf2]
|
||||
vmla.i32 d16, d18, d17
|
||||
// CHECK: vmla.f32 d16, d18, d17 @ encoding: [0xb1,0x0d,0x42,0xf2]
|
||||
@ CHECK: vmla.f32 d16, d18, d17 @ encoding: [0xb1,0x0d,0x42,0xf2]
|
||||
vmla.f32 d16, d18, d17
|
||||
// CHECK: vmla.i8 q9, q8, q10 @ encoding: [0xe4,0x29,0x40,0xf2]
|
||||
@ CHECK: vmla.i8 q9, q8, q10 @ encoding: [0xe4,0x29,0x40,0xf2]
|
||||
vmla.i8 q9, q8, q10
|
||||
// CHECK: vmla.i16 q9, q8, q10 @ encoding: [0xe4,0x29,0x50,0xf2]
|
||||
@ CHECK: vmla.i16 q9, q8, q10 @ encoding: [0xe4,0x29,0x50,0xf2]
|
||||
vmla.i16 q9, q8, q10
|
||||
// CHECK: vmla.i32 q9, q8, q10 @ encoding: [0xe4,0x29,0x60,0xf2]
|
||||
@ CHECK: vmla.i32 q9, q8, q10 @ encoding: [0xe4,0x29,0x60,0xf2]
|
||||
vmla.i32 q9, q8, q10
|
||||
// CHECK: vmla.f32 q9, q8, q10 @ encoding: [0xf4,0x2d,0x40,0xf2]
|
||||
@ CHECK: vmla.f32 q9, q8, q10 @ encoding: [0xf4,0x2d,0x40,0xf2]
|
||||
vmla.f32 q9, q8, q10
|
||||
// CHECK: vmlal.s8 q8, d19, d18 @ encoding: [0xa2,0x08,0xc3,0xf2]
|
||||
@ CHECK: vmlal.s8 q8, d19, d18 @ encoding: [0xa2,0x08,0xc3,0xf2]
|
||||
vmlal.s8 q8, d19, d18
|
||||
// CHECK: vmlal.s16 q8, d19, d18 @ encoding: [0xa2,0x08,0xd3,0xf2]
|
||||
@ CHECK: vmlal.s16 q8, d19, d18 @ encoding: [0xa2,0x08,0xd3,0xf2]
|
||||
vmlal.s16 q8, d19, d18
|
||||
// CHECK: vmlal.s32 q8, d19, d18 @ encoding: [0xa2,0x08,0xe3,0xf2]
|
||||
@ CHECK: vmlal.s32 q8, d19, d18 @ encoding: [0xa2,0x08,0xe3,0xf2]
|
||||
vmlal.s32 q8, d19, d18
|
||||
// CHECK: vmlal.u8 q8, d19, d18 @ encoding: [0xa2,0x08,0xc3,0xf3]
|
||||
@ CHECK: vmlal.u8 q8, d19, d18 @ encoding: [0xa2,0x08,0xc3,0xf3]
|
||||
vmlal.u8 q8, d19, d18
|
||||
// CHECK: vmlal.u16 q8, d19, d18 @ encoding: [0xa2,0x08,0xd3,0xf3]
|
||||
@ CHECK: vmlal.u16 q8, d19, d18 @ encoding: [0xa2,0x08,0xd3,0xf3]
|
||||
vmlal.u16 q8, d19, d18
|
||||
// CHECK: vmlal.u32 q8, d19, d18 @ encoding: [0xa2,0x08,0xe3,0xf3]
|
||||
@ CHECK: vmlal.u32 q8, d19, d18 @ encoding: [0xa2,0x08,0xe3,0xf3]
|
||||
vmlal.u32 q8, d19, d18
|
||||
// CHECK: vqdmlal.s16 q8, d19, d18 @ encoding: [0xa2,0x09,0xd3,0xf2]
|
||||
@ CHECK: vqdmlal.s16 q8, d19, d18 @ encoding: [0xa2,0x09,0xd3,0xf2]
|
||||
vqdmlal.s16 q8, d19, d18
|
||||
// CHECK: vqdmlal.s32 q8, d19, d18 @ encoding: [0xa2,0x09,0xe3,0xf2]
|
||||
@ CHECK: vqdmlal.s32 q8, d19, d18 @ encoding: [0xa2,0x09,0xe3,0xf2]
|
||||
vqdmlal.s32 q8, d19, d18
|
||||
// CHECK: vmls.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf3]
|
||||
@ CHECK: vmls.i8 d16, d18, d17 @ encoding: [0xa1,0x09,0x42,0xf3]
|
||||
vmls.i8 d16, d18, d17
|
||||
// CHECK: vmls.i16 d16, d18, d17 @ encoding: [0xa1,0x09,0x52,0xf3]
|
||||
@ CHECK: vmls.i16 d16, d18, d17 @ encoding: [0xa1,0x09,0x52,0xf3]
|
||||
vmls.i16 d16, d18, d17
|
||||
// CHECK: vmls.i32 d16, d18, d17 @ encoding: [0xa1,0x09,0x62,0xf3]
|
||||
@ CHECK: vmls.i32 d16, d18, d17 @ encoding: [0xa1,0x09,0x62,0xf3]
|
||||
vmls.i32 d16, d18, d17
|
||||
// CHECK: vmls.f32 d16, d18, d17 @ encoding: [0xb1,0x0d,0x62,0xf2]
|
||||
@ CHECK: vmls.f32 d16, d18, d17 @ encoding: [0xb1,0x0d,0x62,0xf2]
|
||||
vmls.f32 d16, d18, d17
|
||||
// CHECK: vmls.i8 q9, q8, q10 @ encoding: [0xe4,0x29,0x40,0xf3]
|
||||
@ CHECK: vmls.i8 q9, q8, q10 @ encoding: [0xe4,0x29,0x40,0xf3]
|
||||
vmls.i8 q9, q8, q10
|
||||
// CHECK: vmls.i16 q9, q8, q10 @ encoding: [0xe4,0x29,0x50,0xf3]
|
||||
@ CHECK: vmls.i16 q9, q8, q10 @ encoding: [0xe4,0x29,0x50,0xf3]
|
||||
vmls.i16 q9, q8, q10
|
||||
// CHECK: vmls.i32 q9, q8, q10 @ encoding: [0xe4,0x29,0x60,0xf3]
|
||||
@ CHECK: vmls.i32 q9, q8, q10 @ encoding: [0xe4,0x29,0x60,0xf3]
|
||||
vmls.i32 q9, q8, q10
|
||||
// CHECK: vmls.f32 q9, q8, q10 @ encoding: [0xf4,0x2d,0x60,0xf2]
|
||||
@ CHECK: vmls.f32 q9, q8, q10 @ encoding: [0xf4,0x2d,0x60,0xf2]
|
||||
vmls.f32 q9, q8, q10
|
||||
// CHECK: vmlsl.s8 q8, d19, d18 @ encoding: [0xa2,0x0a,0xc3,0xf2]
|
||||
@ CHECK: vmlsl.s8 q8, d19, d18 @ encoding: [0xa2,0x0a,0xc3,0xf2]
|
||||
vmlsl.s8 q8, d19, d18
|
||||
// CHECK: vmlsl.s16 q8, d19, d18 @ encoding: [0xa2,0x0a,0xd3,0xf2]
|
||||
@ CHECK: vmlsl.s16 q8, d19, d18 @ encoding: [0xa2,0x0a,0xd3,0xf2]
|
||||
vmlsl.s16 q8, d19, d18
|
||||
// CHECK: vmlsl.s32 q8, d19, d18 @ encoding: [0xa2,0x0a,0xe3,0xf2]
|
||||
@ CHECK: vmlsl.s32 q8, d19, d18 @ encoding: [0xa2,0x0a,0xe3,0xf2]
|
||||
vmlsl.s32 q8, d19, d18
|
||||
// CHECK: vmlsl.u8 q8, d19, d18 @ encoding: [0xa2,0x0a,0xc3,0xf3]
|
||||
@ CHECK: vmlsl.u8 q8, d19, d18 @ encoding: [0xa2,0x0a,0xc3,0xf3]
|
||||
vmlsl.u8 q8, d19, d18
|
||||
// CHECK: vmlsl.u16 q8, d19, d18 @ encoding: [0xa2,0x0a,0xd3,0xf3]
|
||||
@ CHECK: vmlsl.u16 q8, d19, d18 @ encoding: [0xa2,0x0a,0xd3,0xf3]
|
||||
vmlsl.u16 q8, d19, d18
|
||||
// CHECK: vmlsl.u32 q8, d19, d18 @ encoding: [0xa2,0x0a,0xe3,0xf3]
|
||||
@ CHECK: vmlsl.u32 q8, d19, d18 @ encoding: [0xa2,0x0a,0xe3,0xf3]
|
||||
vmlsl.u32 q8, d19, d18
|
||||
// CHECK: vqdmlsl.s16 q8, d19, d18 @ encoding: [0xa2,0x0b,0xd3,0xf2]
|
||||
@ CHECK: vqdmlsl.s16 q8, d19, d18 @ encoding: [0xa2,0x0b,0xd3,0xf2]
|
||||
vqdmlsl.s16 q8, d19, d18
|
||||
// CHECK: vqdmlsl.s32 q8, d19, d18 @ encoding: [0xa2,0x0b,0xe3,0xf2]
|
||||
@ CHECK: vqdmlsl.s32 q8, d19, d18 @ encoding: [0xa2,0x0b,0xe3,0xf2]
|
||||
vqdmlsl.s32 q8, d19, d18
|
||||
|
|
|
@ -1,56 +1,56 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vmul.i8 d16, d16, d17 @ encoding: [0xb1,0x09,0x40,0xf2]
|
||||
@ CHECK: vmul.i8 d16, d16, d17 @ encoding: [0xb1,0x09,0x40,0xf2]
|
||||
vmul.i8 d16, d16, d17
|
||||
// CHECK: vmul.i16 d16, d16, d17 @ encoding: [0xb1,0x09,0x50,0xf2]
|
||||
@ CHECK: vmul.i16 d16, d16, d17 @ encoding: [0xb1,0x09,0x50,0xf2]
|
||||
vmul.i16 d16, d16, d17
|
||||
// CHECK: vmul.i32 d16, d16, d17 @ encoding: [0xb1,0x09,0x60,0xf2]
|
||||
@ CHECK: vmul.i32 d16, d16, d17 @ encoding: [0xb1,0x09,0x60,0xf2]
|
||||
vmul.i32 d16, d16, d17
|
||||
// CHECK: vmul.f32 d16, d16, d17 @ encoding: [0xb1,0x0d,0x40,0xf3]
|
||||
@ CHECK: vmul.f32 d16, d16, d17 @ encoding: [0xb1,0x0d,0x40,0xf3]
|
||||
vmul.f32 d16, d16, d17
|
||||
// CHECK: vmul.i8 q8, q8, q9 @ encoding: [0xf2,0x09,0x40,0xf2]
|
||||
@ CHECK: vmul.i8 q8, q8, q9 @ encoding: [0xf2,0x09,0x40,0xf2]
|
||||
vmul.i8 q8, q8, q9
|
||||
// CHECK: vmul.i16 q8, q8, q9 @ encoding: [0xf2,0x09,0x50,0xf2]
|
||||
@ CHECK: vmul.i16 q8, q8, q9 @ encoding: [0xf2,0x09,0x50,0xf2]
|
||||
vmul.i16 q8, q8, q9
|
||||
// CHECK: vmul.i32 q8, q8, q9 @ encoding: [0xf2,0x09,0x60,0xf2]
|
||||
@ CHECK: vmul.i32 q8, q8, q9 @ encoding: [0xf2,0x09,0x60,0xf2]
|
||||
vmul.i32 q8, q8, q9
|
||||
// CHECK: vmul.f32 q8, q8, q9 @ encoding: [0xf2,0x0d,0x40,0xf3]
|
||||
@ CHECK: vmul.f32 q8, q8, q9 @ encoding: [0xf2,0x0d,0x40,0xf3]
|
||||
vmul.f32 q8, q8, q9
|
||||
// CHECK: vmul.p8 d16, d16, d17 @ encoding: [0xb1,0x09,0x40,0xf3]
|
||||
@ CHECK: vmul.p8 d16, d16, d17 @ encoding: [0xb1,0x09,0x40,0xf3]
|
||||
vmul.p8 d16, d16, d17
|
||||
// CHECK: vmul.p8 q8, q8, q9 @ encoding: [0xf2,0x09,0x40,0xf3]
|
||||
@ CHECK: vmul.p8 q8, q8, q9 @ encoding: [0xf2,0x09,0x40,0xf3]
|
||||
vmul.p8 q8, q8, q9
|
||||
// CHECK: vqdmulh.s16 d16, d16, d17 @ encoding: [0xa1,0x0b,0x50,0xf2]
|
||||
@ CHECK: vqdmulh.s16 d16, d16, d17 @ encoding: [0xa1,0x0b,0x50,0xf2]
|
||||
vqdmulh.s16 d16, d16, d17
|
||||
// CHECK: vqdmulh.s32 d16, d16, d17 @ encoding: [0xa1,0x0b,0x60,0xf2]
|
||||
@ CHECK: vqdmulh.s32 d16, d16, d17 @ encoding: [0xa1,0x0b,0x60,0xf2]
|
||||
vqdmulh.s32 d16, d16, d17
|
||||
// CHECK: vqdmulh.s16 q8, q8, q9 @ encoding: [0xe2,0x0b,0x50,0xf2]
|
||||
@ CHECK: vqdmulh.s16 q8, q8, q9 @ encoding: [0xe2,0x0b,0x50,0xf2]
|
||||
vqdmulh.s16 q8, q8, q9
|
||||
// CHECK: vqdmulh.s32 q8, q8, q9 @ encoding: [0xe2,0x0b,0x60,0xf2]
|
||||
@ CHECK: vqdmulh.s32 q8, q8, q9 @ encoding: [0xe2,0x0b,0x60,0xf2]
|
||||
vqdmulh.s32 q8, q8, q9
|
||||
// CHECK: vqrdmulh.s16 d16, d16, d17 @ encoding: [0xa1,0x0b,0x50,0xf3]
|
||||
@ CHECK: vqrdmulh.s16 d16, d16, d17 @ encoding: [0xa1,0x0b,0x50,0xf3]
|
||||
vqrdmulh.s16 d16, d16, d17
|
||||
// CHECK: vqrdmulh.s32 d16, d16, d17 @ encoding: [0xa1,0x0b,0x60,0xf3]
|
||||
@ CHECK: vqrdmulh.s32 d16, d16, d17 @ encoding: [0xa1,0x0b,0x60,0xf3]
|
||||
vqrdmulh.s32 d16, d16, d17
|
||||
// CHECK: vqrdmulh.s16 q8, q8, q9 @ encoding: [0xe2,0x0b,0x50,0xf3]
|
||||
@ CHECK: vqrdmulh.s16 q8, q8, q9 @ encoding: [0xe2,0x0b,0x50,0xf3]
|
||||
vqrdmulh.s16 q8, q8, q9
|
||||
// CHECK: vqrdmulh.s32 q8, q8, q9 @ encoding: [0xe2,0x0b,0x60,0xf3]
|
||||
@ CHECK: vqrdmulh.s32 q8, q8, q9 @ encoding: [0xe2,0x0b,0x60,0xf3]
|
||||
vqrdmulh.s32 q8, q8, q9
|
||||
// CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf2]
|
||||
@ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf2]
|
||||
vmull.s8 q8, d16, d17
|
||||
// CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf2]
|
||||
@ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf2]
|
||||
vmull.s16 q8, d16, d17
|
||||
// CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf2]
|
||||
@ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf2]
|
||||
vmull.s32 q8, d16, d17
|
||||
// CHECK: vmull.u8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf3]
|
||||
@ CHECK: vmull.u8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf3]
|
||||
vmull.u8 q8, d16, d17
|
||||
// CHECK: vmull.u16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf3]
|
||||
@ CHECK: vmull.u16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf3]
|
||||
vmull.u16 q8, d16, d17
|
||||
// CHECK: vmull.u32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf3]
|
||||
@ CHECK: vmull.u32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf3]
|
||||
vmull.u32 q8, d16, d17
|
||||
// CHECK: vmull.p8 q8, d16, d17 @ encoding: [0xa1,0x0e,0xc0,0xf2]
|
||||
@ CHECK: vmull.p8 q8, d16, d17 @ encoding: [0xa1,0x0e,0xc0,0xf2]
|
||||
vmull.p8 q8, d16, d17
|
||||
// CHECK: vqdmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0d,0xd0,0xf2]
|
||||
@ CHECK: vqdmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0d,0xd0,0xf2]
|
||||
vqdmull.s16 q8, d16, d17
|
||||
// CHECK: vqdmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0d,0xe0,0xf2]
|
||||
@ CHECK: vqdmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0d,0xe0,0xf2]
|
||||
vqdmull.s32 q8, d16, d17
|
||||
|
|
|
@ -1,30 +1,30 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3]
|
||||
@ CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3]
|
||||
vneg.s8 d16, d16
|
||||
// CHECK: vneg.s16 d16, d16 @ encoding: [0xa0,0x03,0xf5,0xf3]
|
||||
@ CHECK: vneg.s16 d16, d16 @ encoding: [0xa0,0x03,0xf5,0xf3]
|
||||
vneg.s16 d16, d16
|
||||
// CHECK: vneg.s32 d16, d16 @ encoding: [0xa0,0x03,0xf9,0xf3]
|
||||
@ CHECK: vneg.s32 d16, d16 @ encoding: [0xa0,0x03,0xf9,0xf3]
|
||||
vneg.s32 d16, d16
|
||||
// CHECK: vneg.f32 d16, d16 @ encoding: [0xa0,0x07,0xf9,0xf3]
|
||||
@ CHECK: vneg.f32 d16, d16 @ encoding: [0xa0,0x07,0xf9,0xf3]
|
||||
vneg.f32 d16, d16
|
||||
// CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3]
|
||||
@ CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3]
|
||||
vneg.s8 q8, q8
|
||||
// CHECK: vneg.s16 q8, q8 @ encoding: [0xe0,0x03,0xf5,0xf3]
|
||||
@ CHECK: vneg.s16 q8, q8 @ encoding: [0xe0,0x03,0xf5,0xf3]
|
||||
vneg.s16 q8, q8
|
||||
// CHECK: vneg.s32 q8, q8 @ encoding: [0xe0,0x03,0xf9,0xf3]
|
||||
@ CHECK: vneg.s32 q8, q8 @ encoding: [0xe0,0x03,0xf9,0xf3]
|
||||
vneg.s32 q8, q8
|
||||
// CHECK: vneg.f32 q8, q8 @ encoding: [0xe0,0x07,0xf9,0xf3]
|
||||
@ CHECK: vneg.f32 q8, q8 @ encoding: [0xe0,0x07,0xf9,0xf3]
|
||||
vneg.f32 q8, q8
|
||||
// CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3]
|
||||
@ CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3]
|
||||
vqneg.s8 d16, d16
|
||||
// CHECK: vqneg.s16 d16, d16 @ encoding: [0xa0,0x07,0xf4,0xf3]
|
||||
@ CHECK: vqneg.s16 d16, d16 @ encoding: [0xa0,0x07,0xf4,0xf3]
|
||||
vqneg.s16 d16, d16
|
||||
// CHECK: vqneg.s32 d16, d16 @ encoding: [0xa0,0x07,0xf8,0xf3]
|
||||
@ CHECK: vqneg.s32 d16, d16 @ encoding: [0xa0,0x07,0xf8,0xf3]
|
||||
vqneg.s32 d16, d16
|
||||
// CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3]
|
||||
@ CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3]
|
||||
vqneg.s8 q8, q8
|
||||
// CHECK: vqneg.s16 q8, q8 @ encoding: [0xe0,0x07,0xf4,0xf3]
|
||||
@ CHECK: vqneg.s16 q8, q8 @ encoding: [0xe0,0x07,0xf4,0xf3]
|
||||
vqneg.s16 q8, q8
|
||||
// CHECK: vqneg.s32 q8, q8 @ encoding: [0xe0,0x07,0xf8,0xf3]
|
||||
@ CHECK: vqneg.s32 q8, q8 @ encoding: [0xe0,0x07,0xf8,0xf3]
|
||||
vqneg.s32 q8, q8
|
||||
|
|
|
@ -1,87 +1,87 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
// XFAIL: *
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ XFAIL: *
|
||||
|
||||
// CHECK: vpadd.i8 d16, d17, d16 @ encoding: [0xb0,0x0b,0x41,0xf2]
|
||||
@ CHECK: vpadd.i8 d16, d17, d16 @ encoding: [0xb0,0x0b,0x41,0xf2]
|
||||
vpadd.i8 d16, d17, d16
|
||||
// CHECK: vpadd.i16 d16, d17, d16 @ encoding: [0xb0,0x0b,0x51,0xf2]
|
||||
@ CHECK: vpadd.i16 d16, d17, d16 @ encoding: [0xb0,0x0b,0x51,0xf2]
|
||||
vpadd.i16 d16, d17, d16
|
||||
// CHECK: vpadd.i32 d16, d17, d16 @ encoding: [0xb0,0x0b,0x61,0xf2]
|
||||
@ CHECK: vpadd.i32 d16, d17, d16 @ encoding: [0xb0,0x0b,0x61,0xf2]
|
||||
vpadd.i32 d16, d17, d16
|
||||
// CHECK: vpadd.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x40,0xf3]
|
||||
@ CHECK: vpadd.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x40,0xf3]
|
||||
vpadd.f32 d16, d16, d17
|
||||
// CHECK: vpaddl.s8 d16, d16 @ encoding: [0x20,0x02,0xf0,0xf3]
|
||||
@ CHECK: vpaddl.s8 d16, d16 @ encoding: [0x20,0x02,0xf0,0xf3]
|
||||
vpaddl.s8 d16, d16
|
||||
// CHECK: vpaddl.s16 d16, d16 @ encoding: [0x20,0x02,0xf4,0xf3]
|
||||
@ CHECK: vpaddl.s16 d16, d16 @ encoding: [0x20,0x02,0xf4,0xf3]
|
||||
vpaddl.s16 d16, d16
|
||||
// CHECK: vpaddl.s32 d16, d16 @ encoding: [0x20,0x02,0xf8,0xf3]
|
||||
@ CHECK: vpaddl.s32 d16, d16 @ encoding: [0x20,0x02,0xf8,0xf3]
|
||||
vpaddl.s32 d16, d16
|
||||
// CHECK: vpaddl.u8 d16, d16 @ encoding: [0xa0,0x02,0xf0,0xf3]
|
||||
@ CHECK: vpaddl.u8 d16, d16 @ encoding: [0xa0,0x02,0xf0,0xf3]
|
||||
vpaddl.u8 d16, d16
|
||||
// CHECK: vpaddl.u16 d16, d16 @ encoding: [0xa0,0x02,0xf4,0xf3]
|
||||
@ CHECK: vpaddl.u16 d16, d16 @ encoding: [0xa0,0x02,0xf4,0xf3]
|
||||
vpaddl.u16 d16, d16
|
||||
// CHECK: vpaddl.u32 d16, d16 @ encoding: [0xa0,0x02,0xf8,0xf3]
|
||||
@ CHECK: vpaddl.u32 d16, d16 @ encoding: [0xa0,0x02,0xf8,0xf3]
|
||||
vpaddl.u32 d16, d16
|
||||
// CHECK: vpaddl.s8 q8, q8 @ encoding: [0x60,0x02,0xf0,0xf3]
|
||||
@ CHECK: vpaddl.s8 q8, q8 @ encoding: [0x60,0x02,0xf0,0xf3]
|
||||
vpaddl.s8 q8, q8
|
||||
// CHECK: vpaddl.s16 q8, q8 @ encoding: [0x60,0x02,0xf4,0xf3]
|
||||
@ CHECK: vpaddl.s16 q8, q8 @ encoding: [0x60,0x02,0xf4,0xf3]
|
||||
vpaddl.s16 q8, q8
|
||||
// CHECK: vpaddl.s32 q8, q8 @ encoding: [0x60,0x02,0xf8,0xf3]
|
||||
@ CHECK: vpaddl.s32 q8, q8 @ encoding: [0x60,0x02,0xf8,0xf3]
|
||||
vpaddl.s32 q8, q8
|
||||
// CHECK: vpaddl.u8 q8, q8 @ encoding: [0xe0,0x02,0xf0,0xf3]
|
||||
@ CHECK: vpaddl.u8 q8, q8 @ encoding: [0xe0,0x02,0xf0,0xf3]
|
||||
vpaddl.u8 q8, q8
|
||||
// CHECK: vpaddl.u16 q8, q8 @ encoding: [0xe0,0x02,0xf4,0xf3]
|
||||
@ CHECK: vpaddl.u16 q8, q8 @ encoding: [0xe0,0x02,0xf4,0xf3]
|
||||
vpaddl.u16 q8, q8
|
||||
// CHECK: vpaddl.u32 q8, q8 @ encoding: [0xe0,0x02,0xf8,0xf3]
|
||||
@ CHECK: vpaddl.u32 q8, q8 @ encoding: [0xe0,0x02,0xf8,0xf3]
|
||||
vpaddl.u32 q8, q8
|
||||
// CHECK: vpadal.s8 d16, d17 @ encoding: [0x21,0x06,0xf0,0xf3]
|
||||
@ CHECK: vpadal.s8 d16, d17 @ encoding: [0x21,0x06,0xf0,0xf3]
|
||||
vpadal.s8 d16, d17
|
||||
// CHECK: vpadal.s16 d16, d17 @ encoding: [0x21,0x06,0xf4,0xf3]
|
||||
@ CHECK: vpadal.s16 d16, d17 @ encoding: [0x21,0x06,0xf4,0xf3]
|
||||
vpadal.s16 d16, d17
|
||||
// CHECK: vpadal.s32 d16, d17 @ encoding: [0x21,0x06,0xf8,0xf3]
|
||||
@ CHECK: vpadal.s32 d16, d17 @ encoding: [0x21,0x06,0xf8,0xf3]
|
||||
vpadal.s32 d16, d17
|
||||
// CHECK: vpadal.u8 d16, d17 @ encoding: [0xa1,0x06,0xf0,0xf3]
|
||||
@ CHECK: vpadal.u8 d16, d17 @ encoding: [0xa1,0x06,0xf0,0xf3]
|
||||
vpadal.u8 d16, d17
|
||||
// CHECK: vpadal.u16 d16, d17 @ encoding: [0xa1,0x06,0xf4,0xf3]
|
||||
@ CHECK: vpadal.u16 d16, d17 @ encoding: [0xa1,0x06,0xf4,0xf3]
|
||||
vpadal.u16 d16, d17
|
||||
// CHECK: vpadal.u32 d16, d17 @ encoding: [0xa1,0x06,0xf8,0xf3]
|
||||
@ CHECK: vpadal.u32 d16, d17 @ encoding: [0xa1,0x06,0xf8,0xf3]
|
||||
vpadal.u32 d16, d17
|
||||
// CHECK: vpadal.s8 q9, q8 @ encoding: [0x60,0x26,0xf0,0xf3]
|
||||
@ CHECK: vpadal.s8 q9, q8 @ encoding: [0x60,0x26,0xf0,0xf3]
|
||||
vpadal.s8 q9, q8
|
||||
// CHECK: vpadal.s16 q9, q8 @ encoding: [0x60,0x26,0xf4,0xf3]
|
||||
@ CHECK: vpadal.s16 q9, q8 @ encoding: [0x60,0x26,0xf4,0xf3]
|
||||
vpadal.s16 q9, q8
|
||||
// CHECK: vpadal.s32 q9, q8 @ encoding: [0x60,0x26,0xf8,0xf3]
|
||||
@ CHECK: vpadal.s32 q9, q8 @ encoding: [0x60,0x26,0xf8,0xf3]
|
||||
vpadal.s32 q9, q8
|
||||
// CHECK: vpadal.u8 q9, q8 @ encoding: [0xe0,0x26,0xf0,0xf3]
|
||||
@ CHECK: vpadal.u8 q9, q8 @ encoding: [0xe0,0x26,0xf0,0xf3]
|
||||
vpadal.u8 q9, q8
|
||||
// CHECK: vpadal.u16 q9, q8 @ encoding: [0xe0,0x26,0xf4,0xf3]
|
||||
@ CHECK: vpadal.u16 q9, q8 @ encoding: [0xe0,0x26,0xf4,0xf3]
|
||||
vpadal.u16 q9, q8
|
||||
// CHECK: vpadal.u32 q9, q8 @ encoding: [0xe0,0x26,0xf8,0xf3]
|
||||
@ CHECK: vpadal.u32 q9, q8 @ encoding: [0xe0,0x26,0xf8,0xf3]
|
||||
vpadal.u32 q9, q8
|
||||
// CHECK: vpmin.s8 d16, d16, d17 @ encoding: [0xb1,0x0a,0x40,0xf2]
|
||||
@ CHECK: vpmin.s8 d16, d16, d17 @ encoding: [0xb1,0x0a,0x40,0xf2]
|
||||
vpmin.s8 d16, d16, d17
|
||||
// CHECK: vpmin.s16 d16, d16, d17 @ encoding: [0xb1,0x0a,0x50,0xf2]
|
||||
@ CHECK: vpmin.s16 d16, d16, d17 @ encoding: [0xb1,0x0a,0x50,0xf2]
|
||||
vpmin.s16 d16, d16, d17
|
||||
// CHECK: vpmin.s32 d16, d16, d17 @ encoding: [0xb1,0x0a,0x60,0xf2]
|
||||
@ CHECK: vpmin.s32 d16, d16, d17 @ encoding: [0xb1,0x0a,0x60,0xf2]
|
||||
vpmin.s32 d16, d16, d17
|
||||
// CHECK: vpmin.u8 d16, d16, d17 @ encoding: [0xb1,0x0a,0x40,0xf3]
|
||||
@ CHECK: vpmin.u8 d16, d16, d17 @ encoding: [0xb1,0x0a,0x40,0xf3]
|
||||
vpmin.u8 d16, d16, d17
|
||||
// CHECK: vpmin.u16 d16, d16, d17 @ encoding: [0xb1,0x0a,0x50,0xf3]
|
||||
@ CHECK: vpmin.u16 d16, d16, d17 @ encoding: [0xb1,0x0a,0x50,0xf3]
|
||||
vpmin.u16 d16, d16, d17
|
||||
// CHECK: vpmin.u32 d16, d16, d17 @ encoding: [0xb1,0x0a,0x60,0xf3]
|
||||
@ CHECK: vpmin.u32 d16, d16, d17 @ encoding: [0xb1,0x0a,0x60,0xf3]
|
||||
vpmin.u32 d16, d16, d17
|
||||
// CHECK: vpmin.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x60,0xf3]
|
||||
@ CHECK: vpmin.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x60,0xf3]
|
||||
vpmin.f32 d16, d16, d17
|
||||
// CHECK: vpmax.s8 d16, d16, d17 @ encoding: [0xa1,0x0a,0x40,0xf2]
|
||||
@ CHECK: vpmax.s8 d16, d16, d17 @ encoding: [0xa1,0x0a,0x40,0xf2]
|
||||
vpmax.s8 d16, d16, d17
|
||||
// CHECK: vpmax.s16 d16, d16, d17 @ encoding: [0xa1,0x0a,0x50,0xf2]
|
||||
@ CHECK: vpmax.s16 d16, d16, d17 @ encoding: [0xa1,0x0a,0x50,0xf2]
|
||||
vpmax.s16 d16, d16, d17
|
||||
// CHECK: vpmax.s32 d16, d16, d17 @ encoding: [0xa1,0x0a,0x60,0xf2]
|
||||
@ CHECK: vpmax.s32 d16, d16, d17 @ encoding: [0xa1,0x0a,0x60,0xf2]
|
||||
vpmax.s32 d16, d16, d17
|
||||
// CHECK: vpmax.u8 d16, d16, d17 @ encoding: [0xa1,0x0a,0x40,0xf3]
|
||||
@ CHECK: vpmax.u8 d16, d16, d17 @ encoding: [0xa1,0x0a,0x40,0xf3]
|
||||
vpmax.u8 d16, d16, d17
|
||||
// CHECK: vpmax.u16 d16, d16, d17 @ encoding: [0xa1,0x0a,0x50,0xf3]
|
||||
@ CHECK: vpmax.u16 d16, d16, d17 @ encoding: [0xa1,0x0a,0x50,0xf3]
|
||||
vpmax.u16 d16, d16, d17
|
||||
// CHECK: vpmax.u32 d16, d16, d17 @ encoding: [0xa1,0x0a,0x60,0xf3]
|
||||
@ CHECK: vpmax.u32 d16, d16, d17 @ encoding: [0xa1,0x0a,0x60,0xf3]
|
||||
vpmax.u32 d16, d16, d17
|
||||
// CHECK: vpmax.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x40,0xf3]
|
||||
@ CHECK: vpmax.f32 d16, d16, d17 @ encoding: [0xa1,0x0f,0x40,0xf3]
|
||||
vpmax.f32 d16, d16, d17
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vrecpe.u32 d16, d16 @ encoding: [0x20,0x04,0xfb,0xf3]
|
||||
@ CHECK: vrecpe.u32 d16, d16 @ encoding: [0x20,0x04,0xfb,0xf3]
|
||||
vrecpe.u32 d16, d16
|
||||
// CHECK: vrecpe.u32 q8, q8 @ encoding: [0x60,0x04,0xfb,0xf3]
|
||||
@ CHECK: vrecpe.u32 q8, q8 @ encoding: [0x60,0x04,0xfb,0xf3]
|
||||
vrecpe.u32 q8, q8
|
||||
// CHECK: vrecpe.f32 d16, d16 @ encoding: [0x20,0x05,0xfb,0xf3]
|
||||
@ CHECK: vrecpe.f32 d16, d16 @ encoding: [0x20,0x05,0xfb,0xf3]
|
||||
vrecpe.f32 d16, d16
|
||||
// CHECK: vrecpe.f32 q8, q8 @ encoding: [0x60,0x05,0xfb,0xf3]
|
||||
@ CHECK: vrecpe.f32 q8, q8 @ encoding: [0x60,0x05,0xfb,0xf3]
|
||||
vrecpe.f32 q8, q8
|
||||
// CHECK: vrecps.f32 d16, d16, d17 @ encoding: [0xb1,0x0f,0x40,0xf2]
|
||||
@ CHECK: vrecps.f32 d16, d16, d17 @ encoding: [0xb1,0x0f,0x40,0xf2]
|
||||
vrecps.f32 d16, d16, d17
|
||||
// CHECK: vrecps.f32 q8, q8, q9 @ encoding: [0xf2,0x0f,0x40,0xf2]
|
||||
@ CHECK: vrecps.f32 q8, q8, q9 @ encoding: [0xf2,0x0f,0x40,0xf2]
|
||||
vrecps.f32 q8, q8, q9
|
||||
// CHECK: vrsqrte.u32 d16, d16 @ encoding: [0xa0,0x04,0xfb,0xf3]
|
||||
@ CHECK: vrsqrte.u32 d16, d16 @ encoding: [0xa0,0x04,0xfb,0xf3]
|
||||
vrsqrte.u32 d16, d16
|
||||
// CHECK: vrsqrte.u32 q8, q8 @ encoding: [0xe0,0x04,0xfb,0xf3]
|
||||
@ CHECK: vrsqrte.u32 q8, q8 @ encoding: [0xe0,0x04,0xfb,0xf3]
|
||||
vrsqrte.u32 q8, q8
|
||||
// CHECK: vrsqrte.f32 d16, d16 @ encoding: [0xa0,0x05,0xfb,0xf3]
|
||||
@ CHECK: vrsqrte.f32 d16, d16 @ encoding: [0xa0,0x05,0xfb,0xf3]
|
||||
vrsqrte.f32 d16, d16
|
||||
// CHECK: vrsqrte.f32 q8, q8 @ encoding: [0xe0,0x05,0xfb,0xf3]
|
||||
@ CHECK: vrsqrte.f32 q8, q8 @ encoding: [0xe0,0x05,0xfb,0xf3]
|
||||
vrsqrte.f32 q8, q8
|
||||
// CHECK: vrsqrts.f32 d16, d16, d17 @ encoding: [0xb1,0x0f,0x60,0xf2]
|
||||
@ CHECK: vrsqrts.f32 d16, d16, d17 @ encoding: [0xb1,0x0f,0x60,0xf2]
|
||||
vrsqrts.f32 d16, d16, d17
|
||||
// CHECK: vrsqrts.f32 q8, q8, q9 @ encoding: [0xf2,0x0f,0x60,0xf2]
|
||||
@ CHECK: vrsqrts.f32 q8, q8, q9 @ encoding: [0xf2,0x0f,0x60,0xf2]
|
||||
vrsqrts.f32 q8, q8, q9
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vrev64.8 d16, d16 @ encoding: [0x20,0x00,0xf0,0xf3]
|
||||
@ CHECK: vrev64.8 d16, d16 @ encoding: [0x20,0x00,0xf0,0xf3]
|
||||
vrev64.8 d16, d16
|
||||
// CHECK: vrev64.16 d16, d16 @ encoding: [0x20,0x00,0xf4,0xf3]
|
||||
@ CHECK: vrev64.16 d16, d16 @ encoding: [0x20,0x00,0xf4,0xf3]
|
||||
vrev64.16 d16, d16
|
||||
// CHECK: vrev64.32 d16, d16 @ encoding: [0x20,0x00,0xf8,0xf3]
|
||||
@ CHECK: vrev64.32 d16, d16 @ encoding: [0x20,0x00,0xf8,0xf3]
|
||||
vrev64.32 d16, d16
|
||||
// CHECK: vrev64.8 q8, q8 @ encoding: [0x60,0x00,0xf0,0xf3]
|
||||
@ CHECK: vrev64.8 q8, q8 @ encoding: [0x60,0x00,0xf0,0xf3]
|
||||
vrev64.8 q8, q8
|
||||
// CHECK: vrev64.16 q8, q8 @ encoding: [0x60,0x00,0xf4,0xf3]
|
||||
@ CHECK: vrev64.16 q8, q8 @ encoding: [0x60,0x00,0xf4,0xf3]
|
||||
vrev64.16 q8, q8
|
||||
// CHECK: vrev64.32 q8, q8 @ encoding: [0x60,0x00,0xf8,0xf3]
|
||||
@ CHECK: vrev64.32 q8, q8 @ encoding: [0x60,0x00,0xf8,0xf3]
|
||||
vrev64.32 q8, q8
|
||||
// CHECK: vrev32.8 d16, d16 @ encoding: [0xa0,0x00,0xf0,0xf3]
|
||||
@ CHECK: vrev32.8 d16, d16 @ encoding: [0xa0,0x00,0xf0,0xf3]
|
||||
vrev32.8 d16, d16
|
||||
// CHECK: vrev32.16 d16, d16 @ encoding: [0xa0,0x00,0xf4,0xf3]
|
||||
@ CHECK: vrev32.16 d16, d16 @ encoding: [0xa0,0x00,0xf4,0xf3]
|
||||
vrev32.16 d16, d16
|
||||
// CHECK: vrev32.8 q8, q8 @ encoding: [0xe0,0x00,0xf0,0xf3]
|
||||
@ CHECK: vrev32.8 q8, q8 @ encoding: [0xe0,0x00,0xf0,0xf3]
|
||||
vrev32.8 q8, q8
|
||||
// CHECK: vrev32.16 q8, q8 @ encoding: [0xe0,0x00,0xf4,0xf3]
|
||||
@ CHECK: vrev32.16 q8, q8 @ encoding: [0xe0,0x00,0xf4,0xf3]
|
||||
vrev32.16 q8, q8
|
||||
// CHECK: vrev16.8 d16, d16 @ encoding: [0x20,0x01,0xf0,0xf3]
|
||||
@ CHECK: vrev16.8 d16, d16 @ encoding: [0x20,0x01,0xf0,0xf3]
|
||||
vrev16.8 d16, d16
|
||||
// CHECK: vrev16.8 q8, q8 @ encoding: [0x60,0x01,0xf0,0xf3]
|
||||
@ CHECK: vrev16.8 q8, q8 @ encoding: [0x60,0x01,0xf0,0xf3]
|
||||
vrev16.8 q8, q8
|
||||
|
|
|
@ -1,150 +1,150 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vqshl.s8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf2]
|
||||
@ CHECK: vqshl.s8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf2]
|
||||
vqshl.s8 d16, d16, d17
|
||||
// CHECK: vqshl.s16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf2]
|
||||
@ CHECK: vqshl.s16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf2]
|
||||
vqshl.s16 d16, d16, d17
|
||||
// CHECK: vqshl.s32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf2]
|
||||
@ CHECK: vqshl.s32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf2]
|
||||
vqshl.s32 d16, d16, d17
|
||||
// CHECK: vqshl.s64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf2]
|
||||
@ CHECK: vqshl.s64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf2]
|
||||
vqshl.s64 d16, d16, d17
|
||||
// CHECK: vqshl.u8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf3]
|
||||
@ CHECK: vqshl.u8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf3]
|
||||
vqshl.u8 d16, d16, d17
|
||||
// CHECK: vqshl.u16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf3]
|
||||
@ CHECK: vqshl.u16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf3]
|
||||
vqshl.u16 d16, d16, d17
|
||||
// CHECK: vqshl.u32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf3]
|
||||
@ CHECK: vqshl.u32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf3]
|
||||
vqshl.u32 d16, d16, d17
|
||||
// CHECK: vqshl.u64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf3]
|
||||
@ CHECK: vqshl.u64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf3]
|
||||
vqshl.u64 d16, d16, d17
|
||||
// CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf2]
|
||||
@ CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf2]
|
||||
vqshl.s8 q8, q8, q9
|
||||
// CHECK: vqshl.s16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf2]
|
||||
@ CHECK: vqshl.s16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf2]
|
||||
vqshl.s16 q8, q8, q9
|
||||
// CHECK: vqshl.s32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf2]
|
||||
@ CHECK: vqshl.s32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf2]
|
||||
vqshl.s32 q8, q8, q9
|
||||
// CHECK: vqshl.s64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf2]
|
||||
@ CHECK: vqshl.s64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf2]
|
||||
vqshl.s64 q8, q8, q9
|
||||
// CHECK: vqshl.u8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf3]
|
||||
@ CHECK: vqshl.u8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf3]
|
||||
vqshl.u8 q8, q8, q9
|
||||
// CHECK: vqshl.u16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf3]
|
||||
@ CHECK: vqshl.u16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf3]
|
||||
vqshl.u16 q8, q8, q9
|
||||
// CHECK: vqshl.u32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf3]
|
||||
@ CHECK: vqshl.u32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf3]
|
||||
vqshl.u32 q8, q8, q9
|
||||
// CHECK: vqshl.u64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf3]
|
||||
@ CHECK: vqshl.u64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf3]
|
||||
vqshl.u64 q8, q8, q9
|
||||
// CHECK: vqshl.s8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf2]
|
||||
@ CHECK: vqshl.s8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf2]
|
||||
vqshl.s8 d16, d16, #7
|
||||
// CHECK: vqshl.s16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf2]
|
||||
@ CHECK: vqshl.s16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf2]
|
||||
vqshl.s16 d16, d16, #15
|
||||
// CHECK: vqshl.s32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf2]
|
||||
@ CHECK: vqshl.s32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf2]
|
||||
vqshl.s32 d16, d16, #31
|
||||
// CHECK: vqshl.s64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf2]
|
||||
@ CHECK: vqshl.s64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf2]
|
||||
vqshl.s64 d16, d16, #63
|
||||
// CHECK: vqshl.u8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf3]
|
||||
@ CHECK: vqshl.u8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf3]
|
||||
vqshl.u8 d16, d16, #7
|
||||
// CHECK: vqshl.u16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf3]
|
||||
@ CHECK: vqshl.u16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf3]
|
||||
vqshl.u16 d16, d16, #15
|
||||
// CHECK: vqshl.u32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf3]
|
||||
@ CHECK: vqshl.u32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf3]
|
||||
vqshl.u32 d16, d16, #31
|
||||
// CHECK: vqshl.u64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf3]
|
||||
@ CHECK: vqshl.u64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf3]
|
||||
vqshl.u64 d16, d16, #63
|
||||
// CHECK: vqshlu.s8 d16, d16, #7 @ encoding: [0x30,0x06,0xcf,0xf3]
|
||||
@ CHECK: vqshlu.s8 d16, d16, #7 @ encoding: [0x30,0x06,0xcf,0xf3]
|
||||
vqshlu.s8 d16, d16, #7
|
||||
// CHECK: vqshlu.s16 d16, d16, #15 @ encoding: [0x30,0x06,0xdf,0xf3]
|
||||
@ CHECK: vqshlu.s16 d16, d16, #15 @ encoding: [0x30,0x06,0xdf,0xf3]
|
||||
vqshlu.s16 d16, d16, #15
|
||||
// CHECK: vqshlu.s32 d16, d16, #31 @ encoding: [0x30,0x06,0xff,0xf3]
|
||||
@ CHECK: vqshlu.s32 d16, d16, #31 @ encoding: [0x30,0x06,0xff,0xf3]
|
||||
vqshlu.s32 d16, d16, #31
|
||||
// CHECK: vqshlu.s64 d16, d16, #63 @ encoding: [0xb0,0x06,0xff,0xf3]
|
||||
@ CHECK: vqshlu.s64 d16, d16, #63 @ encoding: [0xb0,0x06,0xff,0xf3]
|
||||
vqshlu.s64 d16, d16, #63
|
||||
// CHECK: vqshl.s8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf2]
|
||||
@ CHECK: vqshl.s8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf2]
|
||||
vqshl.s8 q8, q8, #7
|
||||
// CHECK: vqshl.s16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf2]
|
||||
@ CHECK: vqshl.s16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf2]
|
||||
vqshl.s16 q8, q8, #15
|
||||
// CHECK: vqshl.s32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf2]
|
||||
@ CHECK: vqshl.s32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf2]
|
||||
vqshl.s32 q8, q8, #31
|
||||
// CHECK: vqshl.s64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf2]
|
||||
@ CHECK: vqshl.s64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf2]
|
||||
vqshl.s64 q8, q8, #63
|
||||
// CHECK: vqshl.u8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf3]
|
||||
@ CHECK: vqshl.u8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf3]
|
||||
vqshl.u8 q8, q8, #7
|
||||
// CHECK: vqshl.u16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf3]
|
||||
@ CHECK: vqshl.u16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf3]
|
||||
vqshl.u16 q8, q8, #15
|
||||
// CHECK: vqshl.u32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf3]
|
||||
@ CHECK: vqshl.u32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf3]
|
||||
vqshl.u32 q8, q8, #31
|
||||
// CHECK: vqshl.u64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf3]
|
||||
@ CHECK: vqshl.u64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf3]
|
||||
vqshl.u64 q8, q8, #63
|
||||
// CHECK: vqshlu.s8 q8, q8, #7 @ encoding: [0x70,0x06,0xcf,0xf3]
|
||||
@ CHECK: vqshlu.s8 q8, q8, #7 @ encoding: [0x70,0x06,0xcf,0xf3]
|
||||
vqshlu.s8 q8, q8, #7
|
||||
// CHECK: vqshlu.s16 q8, q8, #15 @ encoding: [0x70,0x06,0xdf,0xf3]
|
||||
@ CHECK: vqshlu.s16 q8, q8, #15 @ encoding: [0x70,0x06,0xdf,0xf3]
|
||||
vqshlu.s16 q8, q8, #15
|
||||
// CHECK: vqshlu.s32 q8, q8, #31 @ encoding: [0x70,0x06,0xff,0xf3]
|
||||
@ CHECK: vqshlu.s32 q8, q8, #31 @ encoding: [0x70,0x06,0xff,0xf3]
|
||||
vqshlu.s32 q8, q8, #31
|
||||
// CHECK: vqshlu.s64 q8, q8, #63 @ encoding: [0xf0,0x06,0xff,0xf3]
|
||||
@ CHECK: vqshlu.s64 q8, q8, #63 @ encoding: [0xf0,0x06,0xff,0xf3]
|
||||
vqshlu.s64 q8, q8, #63
|
||||
// CHECK: vqrshl.s8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf2]
|
||||
@ CHECK: vqrshl.s8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf2]
|
||||
vqrshl.s8 d16, d16, d17
|
||||
// CHECK: vqrshl.s16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf2]
|
||||
@ CHECK: vqrshl.s16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf2]
|
||||
vqrshl.s16 d16, d16, d17
|
||||
// CHECK: vqrshl.s32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf2]
|
||||
@ CHECK: vqrshl.s32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf2]
|
||||
vqrshl.s32 d16, d16, d17
|
||||
// CHECK: vqrshl.s64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf2]
|
||||
@ CHECK: vqrshl.s64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf2]
|
||||
vqrshl.s64 d16, d16, d17
|
||||
// CHECK: vqrshl.u8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf3]
|
||||
@ CHECK: vqrshl.u8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf3]
|
||||
vqrshl.u8 d16, d16, d17
|
||||
// CHECK: vqrshl.u16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf3]
|
||||
@ CHECK: vqrshl.u16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf3]
|
||||
vqrshl.u16 d16, d16, d17
|
||||
// CHECK: vqrshl.u32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf3]
|
||||
@ CHECK: vqrshl.u32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf3]
|
||||
vqrshl.u32 d16, d16, d17
|
||||
// CHECK: vqrshl.u64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf3]
|
||||
@ CHECK: vqrshl.u64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf3]
|
||||
vqrshl.u64 d16, d16, d17
|
||||
// CHECK: vqrshl.s8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf2]
|
||||
@ CHECK: vqrshl.s8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf2]
|
||||
vqrshl.s8 q8, q8, q9
|
||||
// CHECK: vqrshl.s16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf2]
|
||||
@ CHECK: vqrshl.s16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf2]
|
||||
vqrshl.s16 q8, q8, q9
|
||||
// CHECK: vqrshl.s32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf2]
|
||||
@ CHECK: vqrshl.s32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf2]
|
||||
vqrshl.s32 q8, q8, q9
|
||||
// CHECK: vqrshl.s64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf2]
|
||||
@ CHECK: vqrshl.s64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf2]
|
||||
vqrshl.s64 q8, q8, q9
|
||||
// CHECK: vqrshl.u8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf3]
|
||||
@ CHECK: vqrshl.u8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf3]
|
||||
vqrshl.u8 q8, q8, q9
|
||||
// CHECK: vqrshl.u16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf3]
|
||||
@ CHECK: vqrshl.u16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf3]
|
||||
vqrshl.u16 q8, q8, q9
|
||||
// CHECK: vqrshl.u32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf3]
|
||||
@ CHECK: vqrshl.u32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf3]
|
||||
vqrshl.u32 q8, q8, q9
|
||||
// CHECK: vqrshl.u64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf3]
|
||||
@ CHECK: vqrshl.u64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf3]
|
||||
vqrshl.u64 q8, q8, q9
|
||||
// CHECK: vqshrn.s16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf2]
|
||||
@ CHECK: vqshrn.s16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf2]
|
||||
vqshrn.s16 d16, q8, #8
|
||||
// CHECK: vqshrn.s32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf2]
|
||||
@ CHECK: vqshrn.s32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf2]
|
||||
vqshrn.s32 d16, q8, #16
|
||||
// CHECK: vqshrn.s64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf2]
|
||||
@ CHECK: vqshrn.s64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf2]
|
||||
vqshrn.s64 d16, q8, #32
|
||||
// CHECK: vqshrn.u16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf3]
|
||||
@ CHECK: vqshrn.u16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf3]
|
||||
vqshrn.u16 d16, q8, #8
|
||||
// CHECK: vqshrn.u32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf3]
|
||||
@ CHECK: vqshrn.u32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf3]
|
||||
vqshrn.u32 d16, q8, #16
|
||||
// CHECK: vqshrn.u64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf3]
|
||||
@ CHECK: vqshrn.u64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf3]
|
||||
vqshrn.u64 d16, q8, #32
|
||||
// CHECK: vqshrun.s16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf3]
|
||||
@ CHECK: vqshrun.s16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf3]
|
||||
vqshrun.s16 d16, q8, #8
|
||||
// CHECK: vqshrun.s32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf3]
|
||||
@ CHECK: vqshrun.s32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf3]
|
||||
vqshrun.s32 d16, q8, #16
|
||||
// CHECK: vqshrun.s64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf3]
|
||||
@ CHECK: vqshrun.s64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf3]
|
||||
vqshrun.s64 d16, q8, #32
|
||||
// CHECK: vqrshrn.s16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf2]
|
||||
@ CHECK: vqrshrn.s16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf2]
|
||||
vqrshrn.s16 d16, q8, #8
|
||||
// CHECK: vqrshrn.s32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf2]
|
||||
@ CHECK: vqrshrn.s32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf2]
|
||||
vqrshrn.s32 d16, q8, #16
|
||||
// CHECK: vqrshrn.s64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf2]
|
||||
@ CHECK: vqrshrn.s64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf2]
|
||||
vqrshrn.s64 d16, q8, #32
|
||||
// CHECK: vqrshrn.u16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf3]
|
||||
@ CHECK: vqrshrn.u16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf3]
|
||||
vqrshrn.u16 d16, q8, #8
|
||||
// CHECK: vqrshrn.u32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf3]
|
||||
@ CHECK: vqrshrn.u32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf3]
|
||||
vqrshrn.u32 d16, q8, #16
|
||||
// CHECK: vqrshrn.u64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf3]
|
||||
@ CHECK: vqrshrn.u64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf3]
|
||||
vqrshrn.u64 d16, q8, #32
|
||||
// CHECK: vqrshrun.s16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf3]
|
||||
@ CHECK: vqrshrun.s16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf3]
|
||||
vqrshrun.s16 d16, q8, #8
|
||||
// CHECK: vqrshrun.s32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf3]
|
||||
@ CHECK: vqrshrun.s32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf3]
|
||||
vqrshrun.s32 d16, q8, #16
|
||||
// CHECK: vqrshrun.s64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf3]
|
||||
@ CHECK: vqrshrun.s64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf3]
|
||||
vqrshrun.s64 d16, q8, #32
|
||||
|
|
|
@ -1,160 +1,160 @@
|
|||
// RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
|
||||
|
||||
// CHECK: vshl.u8 d16, d17, d16 @ encoding: [0xa1,0x04,0x40,0xf3]
|
||||
@ CHECK: vshl.u8 d16, d17, d16 @ encoding: [0xa1,0x04,0x40,0xf3]
|
||||
vshl.u8 d16, d17, d16
|
||||
// CHECK: vshl.u16 d16, d17, d16 @ encoding: [0xa1,0x04,0x50,0xf3]
|
||||
@ CHECK: vshl.u16 d16, d17, d16 @ encoding: [0xa1,0x04,0x50,0xf3]
|
||||
vshl.u16 d16, d17, d16
|
||||
// CHECK: vshl.u32 d16, d17, d16 @ encoding: [0xa1,0x04,0x60,0xf3]
|
||||
@ CHECK: vshl.u32 d16, d17, d16 @ encoding: [0xa1,0x04,0x60,0xf3]
|
||||
vshl.u32 d16, d17, d16
|
||||
// CHECK: vshl.u64 d16, d17, d16 @ encoding: [0xa1,0x04,0x70,0xf3]
|
||||
@ CHECK: vshl.u64 d16, d17, d16 @ encoding: [0xa1,0x04,0x70,0xf3]
|
||||
vshl.u64 d16, d17, d16
|
||||
// CHECK: vshl.i8 d16, d16, #7 @ encoding: [0x30,0x05,0xcf,0xf2]
|
||||
@ CHECK: vshl.i8 d16, d16, #7 @ encoding: [0x30,0x05,0xcf,0xf2]
|
||||
vshl.i8 d16, d16, #7
|
||||
// CHECK: vshl.i16 d16, d16, #15 @ encoding: [0x30,0x05,0xdf,0xf2]
|
||||
@ CHECK: vshl.i16 d16, d16, #15 @ encoding: [0x30,0x05,0xdf,0xf2]
|
||||
vshl.i16 d16, d16, #15
|
||||
// CHECK: vshl.i32 d16, d16, #31 @ encoding: [0x30,0x05,0xff,0xf2]
|
||||
@ CHECK: vshl.i32 d16, d16, #31 @ encoding: [0x30,0x05,0xff,0xf2]
|
||||
vshl.i32 d16, d16, #31
|
||||
// CHECK: vshl.i64 d16, d16, #63 @ encoding: [0xb0,0x05,0xff,0xf2]
|
||||
@ CHECK: vshl.i64 d16, d16, #63 @ encoding: [0xb0,0x05,0xff,0xf2]
|
||||
vshl.i64 d16, d16, #63
|
||||
// CHECK: vshl.u8 q8, q9, q8 @ encoding: [0xe2,0x04,0x40,0xf3]
|
||||
@ CHECK: vshl.u8 q8, q9, q8 @ encoding: [0xe2,0x04,0x40,0xf3]
|
||||
vshl.u8 q8, q9, q8
|
||||
// CHECK: vshl.u16 q8, q9, q8 @ encoding: [0xe2,0x04,0x50,0xf3]
|
||||
@ CHECK: vshl.u16 q8, q9, q8 @ encoding: [0xe2,0x04,0x50,0xf3]
|
||||
vshl.u16 q8, q9, q8
|
||||
// CHECK: vshl.u32 q8, q9, q8 @ encoding: [0xe2,0x04,0x60,0xf3]
|
||||
@ CHECK: vshl.u32 q8, q9, q8 @ encoding: [0xe2,0x04,0x60,0xf3]
|
||||
vshl.u32 q8, q9, q8
|
||||
// CHECK: vshl.u64 q8, q9, q8 @ encoding: [0xe2,0x04,0x70,0xf3]
|
||||
@ CHECK: vshl.u64 q8, q9, q8 @ encoding: [0xe2,0x04,0x70,0xf3]
|
||||
vshl.u64 q8, q9, q8
|
||||
// CHECK: vshl.i8 q8, q8, #7 @ encoding: [0x70,0x05,0xcf,0xf2]
|
||||
@ CHECK: vshl.i8 q8, q8, #7 @ encoding: [0x70,0x05,0xcf,0xf2]
|
||||
vshl.i8 q8, q8, #7
|
||||
// CHECK: vshl.i16 q8, q8, #15 @ encoding: [0x70,0x05,0xdf,0xf2]
|
||||
@ CHECK: vshl.i16 q8, q8, #15 @ encoding: [0x70,0x05,0xdf,0xf2]
|
||||
vshl.i16 q8, q8, #15
|
||||
// CHECK: vshl.i32 q8, q8, #31 @ encoding: [0x70,0x05,0xff,0xf2]
|
||||
@ CHECK: vshl.i32 q8, q8, #31 @ encoding: [0x70,0x05,0xff,0xf2]
|
||||
vshl.i32 q8, q8, #31
|
||||
// CHECK: vshl.i64 q8, q8, #63 @ encoding: [0xf0,0x05,0xff,0xf2]
|
||||
@ CHECK: vshl.i64 q8, q8, #63 @ encoding: [0xf0,0x05,0xff,0xf2]
|
||||
vshl.i64 q8, q8, #63
|
||||
// CHECK: vshr.u8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf3]
|
||||
@ CHECK: vshr.u8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf3]
|
||||
vshr.u8 d16, d16, #8
|
||||
// CHECK: vshr.u16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf3]
|
||||
@ CHECK: vshr.u16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf3]
|
||||
vshr.u16 d16, d16, #16
|
||||
// CHECK: vshr.u32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf3]
|
||||
@ CHECK: vshr.u32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf3]
|
||||
vshr.u32 d16, d16, #32
|
||||
// CHECK: vshr.u64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf3]
|
||||
@ CHECK: vshr.u64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf3]
|
||||
vshr.u64 d16, d16, #64
|
||||
// CHECK: vshr.u8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf3]
|
||||
@ CHECK: vshr.u8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf3]
|
||||
vshr.u8 q8, q8, #8
|
||||
// CHECK: vshr.u16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf3]
|
||||
@ CHECK: vshr.u16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf3]
|
||||
vshr.u16 q8, q8, #16
|
||||
// CHECK: vshr.u32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf3]
|
||||
@ CHECK: vshr.u32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf3]
|
||||
vshr.u32 q8, q8, #32
|
||||
// CHECK: vshr.u64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf3]
|
||||
@ CHECK: vshr.u64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf3]
|
||||
vshr.u64 q8, q8, #64
|
||||
// CHECK: vshr.s8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf2]
|
||||
@ CHECK: vshr.s8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf2]
|
||||
vshr.s8 d16, d16, #8
|
||||
// CHECK: vshr.s16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf2]
|
||||
@ CHECK: vshr.s16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf2]
|
||||
vshr.s16 d16, d16, #16
|
||||
// CHECK: vshr.s32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf2]
|
||||
@ CHECK: vshr.s32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf2]
|
||||
vshr.s32 d16, d16, #32
|
||||
// CHECK: vshr.s64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf2]
|
||||
@ CHECK: vshr.s64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf2]
|
||||
vshr.s64 d16, d16, #64
|
||||
// CHECK: vshr.s8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf2]
|
||||
@ CHECK: vshr.s8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf2]
|
||||
vshr.s8 q8, q8, #8
|
||||
// CHECK: vshr.s16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf2]
|
||||
@ CHECK: vshr.s16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf2]
|
||||
vshr.s16 q8, q8, #16
|
||||
// CHECK: vshr.s32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf2
|
||||
@ CHECK: vshr.s32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf2
|
||||
vshr.s32 q8, q8, #32
|
||||
// CHECK: vshr.s64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf2]
|
||||
@ CHECK: vshr.s64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf2]
|
||||
vshr.s64 q8, q8, #64
|
||||
// CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2]
|
||||
@ CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2]
|
||||
vshll.s8 q8, d16, #7
|
||||
// CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2]
|
||||
@ CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2]
|
||||
vshll.s16 q8, d16, #15
|
||||
// CHECK: vshll.s32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf2]
|
||||
@ CHECK: vshll.s32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf2]
|
||||
vshll.s32 q8, d16, #31
|
||||
// CHECK: vshll.u8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf3]
|
||||
@ CHECK: vshll.u8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf3]
|
||||
vshll.u8 q8, d16, #7
|
||||
// CHECK: vshll.u16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf3]
|
||||
@ CHECK: vshll.u16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf3]
|
||||
vshll.u16 q8, d16, #15
|
||||
// CHECK: vshll.u32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf3]
|
||||
@ CHECK: vshll.u32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf3]
|
||||
vshll.u32 q8, d16, #31
|
||||
// CHECK: vshll.i8 q8, d16, #8 @ encoding: [0x20,0x03,0xf2,0xf3]
|
||||
@ CHECK: vshll.i8 q8, d16, #8 @ encoding: [0x20,0x03,0xf2,0xf3]
|
||||
vshll.i8 q8, d16, #8
|
||||
// CHECK: vshll.i16 q8, d16, #16 @ encoding: [0x20,0x03,0xf6,0xf3]
|
||||
@ CHECK: vshll.i16 q8, d16, #16 @ encoding: [0x20,0x03,0xf6,0xf3]
|
||||
vshll.i16 q8, d16, #16
|
||||
// CHECK: vshll.i32 q8, d16, #32 @ encoding: [0x20,0x03,0xfa,0xf3]
|
||||
@ CHECK: vshll.i32 q8, d16, #32 @ encoding: [0x20,0x03,0xfa,0xf3]
|
||||
vshll.i32 q8, d16, #32
|
||||
// CHECK: vshrn.i16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf2]
|
||||
@ CHECK: vshrn.i16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf2]
|
||||
vshrn.i16 d16, q8, #8
|
||||
// CHECK: vshrn.i32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf2]
|
||||
@ CHECK: vshrn.i32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf2]
|
||||
vshrn.i32 d16, q8, #16
|
||||
// CHECK: vshrn.i64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf2]
|
||||
@ CHECK: vshrn.i64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf2]
|
||||
vshrn.i64 d16, q8, #32
|
||||
// CHECK: vrshl.s8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf2]
|
||||
@ CHECK: vrshl.s8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf2]
|
||||
vrshl.s8 d16, d17, d16
|
||||
// CHECK: vrshl.s16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf2]
|
||||
@ CHECK: vrshl.s16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf2]
|
||||
vrshl.s16 d16, d17, d16
|
||||
// CHECK: vrshl.s32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf2]
|
||||
@ CHECK: vrshl.s32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf2]
|
||||
vrshl.s32 d16, d17, d16
|
||||
// CHECK: vrshl.s64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0
|
||||
@ CHECK: vrshl.s64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0
|
||||
vrshl.s64 d16, d17, d16
|
||||
// CHECK: vrshl.u8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf3]
|
||||
@ CHECK: vrshl.u8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf3]
|
||||
vrshl.u8 d16, d17, d16
|
||||
// CHECK: vrshl.u16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf3]
|
||||
@ CHECK: vrshl.u16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf3]
|
||||
vrshl.u16 d16, d17, d16
|
||||
// CHECK: vrshl.u32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf3]
|
||||
@ CHECK: vrshl.u32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf3]
|
||||
vrshl.u32 d16, d17, d16
|
||||
// CHECK: vrshl.u64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0xf3]
|
||||
@ CHECK: vrshl.u64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0xf3]
|
||||
vrshl.u64 d16, d17, d16
|
||||
// CHECK: vrshl.s8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf2]
|
||||
@ CHECK: vrshl.s8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf2]
|
||||
vrshl.s8 q8, q9, q8
|
||||
// CHECK: vrshl.s16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf2]
|
||||
@ CHECK: vrshl.s16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf2]
|
||||
vrshl.s16 q8, q9, q8
|
||||
// CHECK: vrshl.s32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf2]
|
||||
@ CHECK: vrshl.s32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf2]
|
||||
vrshl.s32 q8, q9, q8
|
||||
// CHECK: vrshl.s64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf2]
|
||||
@ CHECK: vrshl.s64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf2]
|
||||
vrshl.s64 q8, q9, q8
|
||||
// CHECK: vrshl.u8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf3]
|
||||
@ CHECK: vrshl.u8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf3]
|
||||
vrshl.u8 q8, q9, q8
|
||||
// CHECK: vrshl.u16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf3]
|
||||
@ CHECK: vrshl.u16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf3]
|
||||
vrshl.u16 q8, q9, q8
|
||||
// CHECK: vrshl.u32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf3]
|
||||
@ CHECK: vrshl.u32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf3]
|
||||
vrshl.u32 q8, q9, q8
|
||||
// CHECK: vrshl.u64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf3]
|
||||
@ CHECK: vrshl.u64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf3]
|
||||
vrshl.u64 q8, q9, q8
|
||||
// CHECK: vrshr.s8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf2]
|
||||
@ CHECK: vrshr.s8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf2]
|
||||
vrshr.s8 d16, d16, #8
|
||||
// CHECK: vrshr.s16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf2]
|
||||
@ CHECK: vrshr.s16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf2]
|
||||
vrshr.s16 d16, d16, #16
|
||||
// CHECK: vrshr.s32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf2]
|
||||
@ CHECK: vrshr.s32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf2]
|
||||
vrshr.s32 d16, d16, #32
|
||||
// CHECK: vrshr.s64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf2]
|
||||
@ CHECK: vrshr.s64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf2]
|
||||
vrshr.s64 d16, d16, #64
|
||||
// CHECK: vrshr.u8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf3]
|
||||
@ CHECK: vrshr.u8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf3]
|
||||
vrshr.u8 d16, d16, #8
|
||||
// CHECK: vrshr.u16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf3]
|
||||
@ CHECK: vrshr.u16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf3]
|
||||
vrshr.u16 d16, d16, #16
|
||||
// CHECK: vrshr.u32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf3]
|
||||
@ CHECK: vrshr.u32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf3]
|
||||
vrshr.u32 d16, d16, #32
|
||||
// CHECK: vrshr.u64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf3]
|
||||
@ CHECK: vrshr.u64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf3]
|
||||
vrshr.u64 d16, d16, #64
|
||||
// CHECK: vrshr.s8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf2]
|
||||
@ CHECK: vrshr.s8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf2]
|
||||
vrshr.s8 q8, q8, #8
|
||||
// CHECK: vrshr.s16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf2]
|
||||
@ CHECK: vrshr.s16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf2]
|
||||
vrshr.s16 q8, q8, #16
|
||||
// CHECK: vrshr.s32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf2]
|
||||
@ CHECK: vrshr.s32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf2]
|
||||
vrshr.s32 q8, q8, #32
|
||||
// CHECK: vrshr.s64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf2]
|
||||
@ CHECK: vrshr.s64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf2]
|
||||
vrshr.s64 q8, q8, #64
|
||||
// CHECK: vrshr.u8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf3]
|
||||
@ CHECK: vrshr.u8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf3]
|
||||
vrshr.u8 q8, q8, #8
|
||||
// CHECK: vrshr.u16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf3]
|
||||
@ CHECK: vrshr.u16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf3]
|
||||
vrshr.u16 q8, q8, #16
|
||||
// CHECK: vrshr.u32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf3]
|
||||
@ CHECK: vrshr.u32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf3]
|
||||
vrshr.u32 q8, q8, #32
|
||||
// CHECK: vrshr.u64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf3]
|
||||
@ CHECK: vrshr.u64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf3]
|
||||
vrshr.u64 q8, q8, #64
|
||||
// CHECK: vrshrn.i16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf2]
|
||||
@ CHECK: vrshrn.i16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf2]
|
||||
vrshrn.i16 d16, q8, #8
|
||||
// CHECK: vrshrn.i32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf2]
|
||||
@ CHECK: vrshrn.i32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf2]
|
||||
vrshrn.i32 d16, q8, #16
|
||||
// CHECK: vrshrn.i64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf2]
|
||||
@ CHECK: vrshrn.i64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf2]
|
||||
vrshrn.i64 d16, q8, #32
|
||||
|
|
Loading…
Reference in New Issue