[mips] Remove codegen support from some 16 bit instructions

These instructions conflict with their full length variants
for the purposes of FastISel as they cannot be distingushed
based on the number and type of operands and predicates.

Reviewers: atanasyan

Differential Revision: https://reviews.llvm.org/D41285

llvm-svn: 325341
This commit is contained in:
Simon Dardis 2018-02-16 13:34:23 +00:00
parent 1c7211d754
commit b8ae30ecec
7 changed files with 271 additions and 331 deletions

View File

@ -1068,19 +1068,14 @@ class ADDU16_MMR6_DESC : ArithRMM16<"addu16", GPRMM16Opnd, 1, II_ADDU, add>,
MMR6Arch<"addu16"> {
int AddedComplexity = 1;
}
class AND16_MMR6_DESC : LogicRMM16<"and16", GPRMM16Opnd, II_AND, and>,
MMR6Arch<"and16"> {
int AddedComplexity = 1;
}
class AND16_MMR6_DESC : LogicRMM16<"and16", GPRMM16Opnd, II_AND>,
MMR6Arch<"and16">;
class ANDI16_MMR6_DESC : AndImmMM16<"andi16", GPRMM16Opnd, II_AND>,
MMR6Arch<"andi16">;
class NOT16_MMR6_DESC : NotMM16<"not16", GPRMM16Opnd>, MMR6Arch<"not16"> {
int AddedComplexity = 1;
}
class OR16_MMR6_DESC : LogicRMM16<"or16", GPRMM16Opnd, II_OR, or>,
MMR6Arch<"or16"> {
int AddedComplexity = 1;
}
class OR16_MMR6_DESC : LogicRMM16<"or16", GPRMM16Opnd, II_OR>, MMR6Arch<"or16">;
class SLL16_MMR6_DESC : ShiftIMM16<"sll16", uimm3_shift, GPRMM16Opnd, II_SLL>,
MMR6Arch<"sll16">;
class SRL16_MMR6_DESC : ShiftIMM16<"srl16", uimm3_shift, GPRMM16Opnd, II_SRL>,
@ -1098,10 +1093,8 @@ class SUBU16_MMR6_DESC : ArithRMM16<"subu16", GPRMM16Opnd, 0, II_SUBU, sub>,
MMR6Arch<"subu16">, MicroMipsR6Inst16 {
int AddedComplexity = 1;
}
class XOR16_MMR6_DESC : LogicRMM16<"xor16", GPRMM16Opnd, II_XOR, xor>,
MMR6Arch<"xor16"> {
int AddedComplexity = 1;
}
class XOR16_MMR6_DESC : LogicRMM16<"xor16", GPRMM16Opnd, II_XOR>,
MMR6Arch<"xor16">;
class LW_MMR6_DESC : MMR6Arch<"lw">, MipsR6Inst {
dag OutOperandList = (outs GPR32Opnd:$rt);

View File

@ -72,8 +72,7 @@ define signext i1 @and_i1(i1 signext %a, i1 signext %b) {
;
; MM32R6-LABEL: and_i1:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: and16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: and $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = and i1 %a, %b
@ -122,8 +121,7 @@ define signext i8 @and_i8(i8 signext %a, i8 signext %b) {
;
; MM32R6-LABEL: and_i8:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: and16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: and $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = and i8 %a, %b
@ -172,8 +170,7 @@ define signext i16 @and_i16(i16 signext %a, i16 signext %b) {
;
; MM32R6-LABEL: and_i16:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: and16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: and $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = and i16 %a, %b
@ -222,8 +219,7 @@ define signext i32 @and_i32(i32 signext %a, i32 signext %b) {
;
; MM32R6-LABEL: and_i32:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: and16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: and $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = and i32 %a, %b
@ -274,10 +270,8 @@ define signext i64 @and_i64(i64 signext %a, i64 signext %b) {
;
; MM32R6-LABEL: and_i64:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: and16 $4, $6
; MM32R6-NEXT: and16 $5, $7
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: move $3, $5
; MM32R6-NEXT: and $2, $4, $6
; MM32R6-NEXT: and $3, $5, $7
; MM32R6-NEXT: jrc $ra
entry:
%r = and i64 %a, %b
@ -353,14 +347,14 @@ define signext i128 @and_i128(i128 signext %a, i128 signext %b) {
;
; MM32R6-LABEL: and_i128:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: lw $3, 20($sp)
; MM32R6-NEXT: lw $1, 20($sp)
; MM32R6-NEXT: lw $2, 16($sp)
; MM32R6-NEXT: and16 $2, $4
; MM32R6-NEXT: and16 $3, $5
; MM32R6-NEXT: lw $4, 24($sp)
; MM32R6-NEXT: and16 $4, $6
; MM32R6-NEXT: lw $5, 28($sp)
; MM32R6-NEXT: and16 $5, $7
; MM32R6-NEXT: and $2, $4, $2
; MM32R6-NEXT: and $3, $5, $1
; MM32R6-NEXT: lw $1, 24($sp)
; MM32R6-NEXT: and $4, $6, $1
; MM32R6-NEXT: lw $1, 28($sp)
; MM32R6-NEXT: and $5, $7, $1
; MM32R6-NEXT: jrc $ra
entry:
%r = and i128 %a, %b
@ -1381,8 +1375,8 @@ define signext i16 @and_i16_32768(i16 signext %b) {
;
; MM32R6-LABEL: and_i16_32768:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: addiu $2, $zero, -32768
; MM32R6-NEXT: and16 $2, $4
; MM32R6-NEXT: addiu $1, $zero, -32768
; MM32R6-NEXT: and $2, $4, $1
; MM32R6-NEXT: jrc $ra
entry:

View File

@ -388,7 +388,7 @@ define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
; MMR6-NEXT: not16 $6, $7
; MMR6-NEXT: sll16 $4, $4, 1
; MMR6-NEXT: sllv $4, $4, $6
; MMR6-NEXT: or16 $4, $5
; MMR6-NEXT: or $4, $4, $5
; MMR6-NEXT: seleqz $4, $4, $3
; MMR6-NEXT: selnez $1, $1, $3
; MMR6-NEXT: or $3, $1, $4
@ -877,104 +877,89 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
;
; MMR6-LABEL: ashr_i128:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: addiu $sp, $sp, -40
; MMR6-NEXT: .cfi_def_cfa_offset 40
; MMR6-NEXT: sw $17, 36($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
; MMR6-NEXT: addiu $sp, $sp, -16
; MMR6-NEXT: .cfi_def_cfa_offset 16
; MMR6-NEXT: sw $17, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 17, -4
; MMR6-NEXT: .cfi_offset 16, -8
; MMR6-NEXT: move $1, $7
; MMR6-NEXT: sw $6, 28($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $6, $5
; MMR6-NEXT: sw $4, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: lw $3, 68($sp)
; MMR6-NEXT: lw $3, 44($sp)
; MMR6-NEXT: li16 $2, 64
; MMR6-NEXT: subu16 $7, $2, $3
; MMR6-NEXT: sllv $8, $5, $7
; MMR6-NEXT: andi16 $5, $7, 32
; MMR6-NEXT: selnez $9, $8, $5
; MMR6-NEXT: sllv $16, $4, $7
; MMR6-NEXT: andi16 $2, $7, 32
; MMR6-NEXT: selnez $9, $8, $2
; MMR6-NEXT: sllv $10, $4, $7
; MMR6-NEXT: not16 $7, $7
; MMR6-NEXT: srl16 $17, $6, 1
; MMR6-NEXT: sw $6, 20($sp) # 4-byte Folded Spill
; MMR6-NEXT: srlv $7, $17, $7
; MMR6-NEXT: or16 $7, $16
; MMR6-NEXT: seleqz $7, $7, $5
; MMR6-NEXT: srl16 $16, $5, 1
; MMR6-NEXT: srlv $7, $16, $7
; MMR6-NEXT: or $7, $10, $7
; MMR6-NEXT: seleqz $7, $7, $2
; MMR6-NEXT: or $7, $9, $7
; MMR6-NEXT: srlv $17, $1, $3
; MMR6-NEXT: not16 $2, $3
; MMR6-NEXT: sw $2, 24($sp) # 4-byte Folded Spill
; MMR6-NEXT: lw $4, 28($sp) # 4-byte Folded Reload
; MMR6-NEXT: sll16 $16, $4, 1
; MMR6-NEXT: sllv $16, $16, $2
; MMR6-NEXT: or16 $16, $17
; MMR6-NEXT: srlv $9, $1, $3
; MMR6-NEXT: not16 $16, $3
; MMR6-NEXT: sw $16, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: sll16 $17, $6, 1
; MMR6-NEXT: sllv $10, $17, $16
; MMR6-NEXT: or $9, $10, $9
; MMR6-NEXT: andi16 $17, $3, 32
; MMR6-NEXT: seleqz $9, $16, $17
; MMR6-NEXT: srlv $10, $4, $3
; MMR6-NEXT: seleqz $9, $9, $17
; MMR6-NEXT: srlv $10, $6, $3
; MMR6-NEXT: selnez $11, $10, $17
; MMR6-NEXT: seleqz $16, $10, $17
; MMR6-NEXT: or16 $16, $7
; MMR6-NEXT: seleqz $2, $8, $5
; MMR6-NEXT: sw $2, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: or $7, $11, $9
; MMR6-NEXT: seleqz $10, $10, $17
; MMR6-NEXT: or $10, $10, $7
; MMR6-NEXT: seleqz $12, $8, $2
; MMR6-NEXT: or $8, $11, $9
; MMR6-NEXT: addiu $2, $3, -64
; MMR6-NEXT: srlv $4, $6, $2
; MMR6-NEXT: sw $4, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: lw $5, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: sll16 $4, $5, 1
; MMR6-NEXT: sw $4, 16($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $6, $2
; MMR6-NEXT: sllv $6, $4, $6
; MMR6-NEXT: sltiu $8, $3, 64
; MMR6-NEXT: move $4, $7
; MMR6-NEXT: lw $7, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $4, $7
; MMR6-NEXT: selnez $9, $16, $8
; MMR6-NEXT: lw $7, 4($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $6, $7
; MMR6-NEXT: srav $7, $5, $2
; MMR6-NEXT: srlv $9, $5, $2
; MMR6-NEXT: sll16 $7, $4, 1
; MMR6-NEXT: not16 $16, $2
; MMR6-NEXT: sllv $11, $7, $16
; MMR6-NEXT: sltiu $13, $3, 64
; MMR6-NEXT: or $8, $8, $12
; MMR6-NEXT: selnez $10, $10, $13
; MMR6-NEXT: or $9, $11, $9
; MMR6-NEXT: srav $11, $4, $2
; MMR6-NEXT: andi16 $2, $2, 32
; MMR6-NEXT: seleqz $10, $7, $2
; MMR6-NEXT: sra $11, $5, 31
; MMR6-NEXT: selnez $12, $11, $2
; MMR6-NEXT: seleqz $6, $6, $2
; MMR6-NEXT: or $10, $12, $10
; MMR6-NEXT: seleqz $10, $10, $8
; MMR6-NEXT: selnez $2, $7, $2
; MMR6-NEXT: seleqz $7, $11, $8
; MMR6-NEXT: or $9, $9, $10
; MMR6-NEXT: selnez $9, $9, $3
; MMR6-NEXT: selnez $4, $4, $8
; MMR6-NEXT: or $2, $2, $6
; MMR6-NEXT: srav $5, $5, $3
; MMR6-NEXT: seleqz $6, $5, $17
; MMR6-NEXT: selnez $10, $11, $17
; MMR6-NEXT: or $6, $10, $6
; MMR6-NEXT: selnez $6, $6, $8
; MMR6-NEXT: seleqz $2, $2, $8
; MMR6-NEXT: lw $16, 28($sp) # 4-byte Folded Reload
; MMR6-NEXT: seleqz $10, $16, $3
; MMR6-NEXT: seleqz $12, $11, $2
; MMR6-NEXT: sra $14, $4, 31
; MMR6-NEXT: selnez $15, $14, $2
; MMR6-NEXT: seleqz $9, $9, $2
; MMR6-NEXT: or $12, $15, $12
; MMR6-NEXT: seleqz $12, $12, $13
; MMR6-NEXT: selnez $2, $11, $2
; MMR6-NEXT: seleqz $11, $14, $13
; MMR6-NEXT: or $10, $10, $12
; MMR6-NEXT: selnez $10, $10, $3
; MMR6-NEXT: selnez $8, $8, $13
; MMR6-NEXT: or $2, $2, $9
; MMR6-NEXT: srav $9, $4, $3
; MMR6-NEXT: seleqz $4, $9, $17
; MMR6-NEXT: selnez $12, $14, $17
; MMR6-NEXT: or $4, $12, $4
; MMR6-NEXT: selnez $12, $4, $13
; MMR6-NEXT: seleqz $2, $2, $13
; MMR6-NEXT: seleqz $4, $6, $3
; MMR6-NEXT: seleqz $1, $1, $3
; MMR6-NEXT: or $2, $4, $2
; MMR6-NEXT: or $2, $8, $2
; MMR6-NEXT: selnez $2, $2, $3
; MMR6-NEXT: or $1, $1, $2
; MMR6-NEXT: or $4, $10, $9
; MMR6-NEXT: or $2, $6, $7
; MMR6-NEXT: lw $6, 20($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $3, $6, $3
; MMR6-NEXT: lw $6, 24($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
; MMR6-NEXT: sllv $6, $16, $6
; MMR6-NEXT: or16 $6, $3
; MMR6-NEXT: seleqz $3, $6, $17
; MMR6-NEXT: selnez $5, $5, $17
; MMR6-NEXT: or $4, $4, $10
; MMR6-NEXT: or $2, $12, $11
; MMR6-NEXT: srlv $3, $5, $3
; MMR6-NEXT: lw $5, 4($sp) # 4-byte Folded Reload
; MMR6-NEXT: sllv $5, $7, $5
; MMR6-NEXT: or $3, $5, $3
; MMR6-NEXT: selnez $3, $3, $8
; MMR6-NEXT: or $3, $3, $7
; MMR6-NEXT: seleqz $3, $3, $17
; MMR6-NEXT: selnez $5, $9, $17
; MMR6-NEXT: or $3, $5, $3
; MMR6-NEXT: selnez $3, $3, $13
; MMR6-NEXT: or $3, $3, $11
; MMR6-NEXT: move $5, $1
; MMR6-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 40
; MMR6-NEXT: lw $16, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 16
; MMR6-NEXT: jrc $ra
entry:
; o32 shouldn't use TImode helpers.

View File

@ -403,13 +403,13 @@ define signext i64 @lshr_i64(i64 signext %a, i64 signext %b) {
;
; MMR6-LABEL: lshr_i64:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: srlv $2, $5, $7
; MMR6-NEXT: not16 $3, $7
; MMR6-NEXT: sll16 $5, $4, 1
; MMR6-NEXT: sllv $3, $5, $3
; MMR6-NEXT: or16 $3, $2
; MMR6-NEXT: srlv $1, $5, $7
; MMR6-NEXT: not16 $2, $7
; MMR6-NEXT: sll16 $3, $4, 1
; MMR6-NEXT: sllv $2, $3, $2
; MMR6-NEXT: or $1, $2, $1
; MMR6-NEXT: andi16 $2, $7, 32
; MMR6-NEXT: seleqz $1, $3, $2
; MMR6-NEXT: seleqz $1, $1, $2
; MMR6-NEXT: srlv $4, $4, $7
; MMR6-NEXT: selnez $3, $4, $2
; MMR6-NEXT: or $3, $3, $1
@ -906,106 +906,98 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
;
; MMR6-LABEL: lshr_i128:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: addiu $sp, $sp, -48
; MMR6-NEXT: .cfi_def_cfa_offset 48
; MMR6-NEXT: sw $17, 44($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 40($sp) # 4-byte Folded Spill
; MMR6-NEXT: addiu $sp, $sp, -32
; MMR6-NEXT: .cfi_def_cfa_offset 32
; MMR6-NEXT: sw $17, 28($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 24($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 17, -4
; MMR6-NEXT: .cfi_offset 16, -8
; MMR6-NEXT: move $1, $7
; MMR6-NEXT: sw $5, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $16, $4
; MMR6-NEXT: sw $4, 32($sp) # 4-byte Folded Spill
; MMR6-NEXT: lw $3, 76($sp)
; MMR6-NEXT: srlv $2, $7, $3
; MMR6-NEXT: move $7, $5
; MMR6-NEXT: lw $3, 60($sp)
; MMR6-NEXT: srlv $2, $1, $3
; MMR6-NEXT: not16 $5, $3
; MMR6-NEXT: sw $5, 24($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $4, $6
; MMR6-NEXT: sw $6, 28($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $5, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $17, $6
; MMR6-NEXT: sw $6, 16($sp) # 4-byte Folded Spill
; MMR6-NEXT: sll16 $6, $6, 1
; MMR6-NEXT: sllv $17, $6, $5
; MMR6-NEXT: or16 $17, $2
; MMR6-NEXT: addiu $7, $3, -64
; MMR6-NEXT: sw $7, 36($sp) # 4-byte Folded Spill
; MMR6-NEXT: lw $5, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $6, $5, $7
; MMR6-NEXT: sll16 $2, $16, 1
; MMR6-NEXT: sw $2, 20($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $16, $7
; MMR6-NEXT: sllv $7, $2, $16
; MMR6-NEXT: sllv $6, $6, $5
; MMR6-NEXT: or $8, $6, $2
; MMR6-NEXT: addiu $5, $3, -64
; MMR6-NEXT: srlv $9, $7, $5
; MMR6-NEXT: move $6, $4
; MMR6-NEXT: sll16 $2, $4, 1
; MMR6-NEXT: sw $2, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $16, $5
; MMR6-NEXT: sllv $10, $2, $16
; MMR6-NEXT: andi16 $16, $3, 32
; MMR6-NEXT: seleqz $8, $17, $16
; MMR6-NEXT: or16 $7, $6
; MMR6-NEXT: srlv $10, $4, $3
; MMR6-NEXT: selnez $9, $10, $16
; MMR6-NEXT: seleqz $8, $8, $16
; MMR6-NEXT: or $9, $10, $9
; MMR6-NEXT: srlv $10, $17, $3
; MMR6-NEXT: selnez $11, $10, $16
; MMR6-NEXT: li16 $17, 64
; MMR6-NEXT: subu16 $6, $17, $3
; MMR6-NEXT: sllv $11, $5, $6
; MMR6-NEXT: move $17, $5
; MMR6-NEXT: andi16 $4, $6, 32
; MMR6-NEXT: lw $2, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: andi16 $2, $2, 32
; MMR6-NEXT: sw $2, 16($sp) # 4-byte Folded Spill
; MMR6-NEXT: seleqz $12, $7, $2
; MMR6-NEXT: seleqz $2, $11, $4
; MMR6-NEXT: sw $2, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: or $5, $9, $8
; MMR6-NEXT: selnez $8, $11, $4
; MMR6-NEXT: lw $2, 32($sp) # 4-byte Folded Reload
; MMR6-NEXT: sllv $7, $2, $6
; MMR6-NEXT: sw $7, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $6, $6
; MMR6-NEXT: move $7, $17
; MMR6-NEXT: srl16 $17, $17, 1
; MMR6-NEXT: srlv $6, $17, $6
; MMR6-NEXT: lw $17, 4($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $6, $17
; MMR6-NEXT: seleqz $4, $6, $4
; MMR6-NEXT: lw $6, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $9, $2, $6
; MMR6-NEXT: or $4, $8, $4
; MMR6-NEXT: lw $2, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $5, $2
; MMR6-NEXT: srlv $2, $7, $3
; MMR6-NEXT: lw $17, 16($sp) # 4-byte Folded Reload
; MMR6-NEXT: selnez $6, $9, $17
; MMR6-NEXT: subu16 $2, $17, $3
; MMR6-NEXT: sllv $12, $7, $2
; MMR6-NEXT: move $17, $7
; MMR6-NEXT: andi16 $4, $2, 32
; MMR6-NEXT: andi16 $7, $5, 32
; MMR6-NEXT: sw $7, 20($sp) # 4-byte Folded Spill
; MMR6-NEXT: seleqz $9, $9, $7
; MMR6-NEXT: seleqz $13, $12, $4
; MMR6-NEXT: or $8, $11, $8
; MMR6-NEXT: selnez $11, $12, $4
; MMR6-NEXT: sllv $12, $6, $2
; MMR6-NEXT: move $7, $6
; MMR6-NEXT: sw $6, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $2, $2
; MMR6-NEXT: srl16 $6, $17, 1
; MMR6-NEXT: srlv $2, $6, $2
; MMR6-NEXT: or $2, $12, $2
; MMR6-NEXT: seleqz $2, $2, $4
; MMR6-NEXT: srlv $4, $7, $5
; MMR6-NEXT: or $11, $11, $2
; MMR6-NEXT: or $5, $8, $13
; MMR6-NEXT: srlv $6, $17, $3
; MMR6-NEXT: lw $2, 20($sp) # 4-byte Folded Reload
; MMR6-NEXT: selnez $7, $4, $2
; MMR6-NEXT: sltiu $8, $3, 64
; MMR6-NEXT: selnez $13, $5, $8
; MMR6-NEXT: or $11, $6, $12
; MMR6-NEXT: lw $5, 24($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $6, 20($sp) # 4-byte Folded Reload
; MMR6-NEXT: sllv $7, $6, $5
; MMR6-NEXT: seleqz $6, $10, $16
; MMR6-NEXT: li16 $5, 0
; MMR6-NEXT: or16 $6, $4
; MMR6-NEXT: or16 $7, $2
; MMR6-NEXT: seleqz $4, $11, $8
; MMR6-NEXT: seleqz $10, $5, $8
; MMR6-NEXT: lw $2, 32($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $11, $2, $3
; MMR6-NEXT: seleqz $5, $11, $16
; MMR6-NEXT: selnez $12, $5, $8
; MMR6-NEXT: or $7, $7, $9
; MMR6-NEXT: lw $5, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $2, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: sllv $9, $2, $5
; MMR6-NEXT: seleqz $10, $10, $16
; MMR6-NEXT: li16 $5, 0
; MMR6-NEXT: or $10, $10, $11
; MMR6-NEXT: or $6, $9, $6
; MMR6-NEXT: seleqz $2, $7, $8
; MMR6-NEXT: seleqz $7, $5, $8
; MMR6-NEXT: lw $5, 4($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $9, $5, $3
; MMR6-NEXT: seleqz $11, $9, $16
; MMR6-NEXT: selnez $11, $11, $8
; MMR6-NEXT: seleqz $1, $1, $3
; MMR6-NEXT: or $2, $13, $4
; MMR6-NEXT: or $2, $12, $2
; MMR6-NEXT: selnez $2, $2, $3
; MMR6-NEXT: or $5, $1, $2
; MMR6-NEXT: or $2, $10, $12
; MMR6-NEXT: seleqz $1, $7, $16
; MMR6-NEXT: selnez $7, $11, $16
; MMR6-NEXT: lw $4, 28($sp) # 4-byte Folded Reload
; MMR6-NEXT: seleqz $4, $4, $3
; MMR6-NEXT: selnez $6, $6, $8
; MMR6-NEXT: seleqz $9, $9, $17
; MMR6-NEXT: seleqz $9, $9, $8
; MMR6-NEXT: or $6, $6, $9
; MMR6-NEXT: selnez $3, $6, $3
; MMR6-NEXT: or $4, $4, $3
; MMR6-NEXT: or $1, $7, $1
; MMR6-NEXT: or $2, $7, $11
; MMR6-NEXT: seleqz $1, $6, $16
; MMR6-NEXT: selnez $6, $9, $16
; MMR6-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
; MMR6-NEXT: seleqz $9, $16, $3
; MMR6-NEXT: selnez $10, $10, $8
; MMR6-NEXT: lw $16, 20($sp) # 4-byte Folded Reload
; MMR6-NEXT: seleqz $4, $4, $16
; MMR6-NEXT: seleqz $4, $4, $8
; MMR6-NEXT: or $4, $10, $4
; MMR6-NEXT: selnez $3, $4, $3
; MMR6-NEXT: or $4, $9, $3
; MMR6-NEXT: or $1, $6, $1
; MMR6-NEXT: selnez $1, $1, $8
; MMR6-NEXT: or $3, $10, $1
; MMR6-NEXT: lw $16, 40($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 44($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 48
; MMR6-NEXT: or $3, $7, $1
; MMR6-NEXT: lw $16, 24($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 28($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 32
; MMR6-NEXT: jrc $ra
entry:

View File

@ -37,8 +37,7 @@ define signext i1 @or_i1(i1 signext %a, i1 signext %b) {
;
; MM32R6-LABEL: or_i1:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: or16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: or $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = or i1 %a, %b
@ -65,8 +64,7 @@ define signext i8 @or_i8(i8 signext %a, i8 signext %b) {
;
; MM32R6-LABEL: or_i8:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: or16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: or $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = or i8 %a, %b
@ -93,8 +91,7 @@ define signext i16 @or_i16(i16 signext %a, i16 signext %b) {
;
; MM32R6-LABEL: or_i16:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: or16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: or $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = or i16 %a, %b
@ -121,8 +118,7 @@ define signext i32 @or_i32(i32 signext %a, i32 signext %b) {
;
; MM32R6-LABEL: or_i32:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: or16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: or $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = or i32 %a, %b
@ -151,10 +147,8 @@ define signext i64 @or_i64(i64 signext %a, i64 signext %b) {
;
; MM32R6-LABEL: or_i64:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: or16 $4, $6
; MM32R6-NEXT: or16 $5, $7
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: move $3, $5
; MM32R6-NEXT: or $2, $4, $6
; MM32R6-NEXT: or $3, $5, $7
; MM32R6-NEXT: jrc $ra
entry:
%r = or i64 %a, %b
@ -194,14 +188,14 @@ define signext i128 @or_i128(i128 signext %a, i128 signext %b) {
;
; MM32R6-LABEL: or_i128:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: lw $3, 20($sp)
; MM32R6-NEXT: lw $1, 20($sp)
; MM32R6-NEXT: lw $2, 16($sp)
; MM32R6-NEXT: or16 $2, $4
; MM32R6-NEXT: or16 $3, $5
; MM32R6-NEXT: lw $4, 24($sp)
; MM32R6-NEXT: or16 $4, $6
; MM32R6-NEXT: lw $5, 28($sp)
; MM32R6-NEXT: or16 $5, $7
; MM32R6-NEXT: or $2, $4, $2
; MM32R6-NEXT: or $3, $5, $1
; MM32R6-NEXT: lw $1, 24($sp)
; MM32R6-NEXT: or $4, $6, $1
; MM32R6-NEXT: lw $1, 28($sp)
; MM32R6-NEXT: or $5, $7, $1
; MM32R6-NEXT: jrc $ra
entry:
%r = or i128 %a, %b
@ -777,8 +771,8 @@ define signext i16 @or_i16_32768(i16 signext %b) {
;
; MM32R6-LABEL: or_i16_32768:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: addiu $2, $zero, -32768
; MM32R6-NEXT: or16 $2, $4
; MM32R6-NEXT: addiu $1, $zero, -32768
; MM32R6-NEXT: or $2, $4, $1
; MM32R6-NEXT: jrc $ra
entry:
%r = or i16 32768, %b

View File

@ -435,17 +435,17 @@ define signext i64 @shl_i64(i64 signext %a, i64 signext %b) {
;
; MMR6-LABEL: shl_i64:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: sllv $2, $4, $7
; MMR6-NEXT: not16 $3, $7
; MMR6-NEXT: srl16 $4, $5, 1
; MMR6-NEXT: srlv $3, $4, $3
; MMR6-NEXT: or16 $3, $2
; MMR6-NEXT: andi16 $4, $7, 32
; MMR6-NEXT: seleqz $1, $3, $4
; MMR6-NEXT: sllv $3, $5, $7
; MMR6-NEXT: selnez $2, $3, $4
; MMR6-NEXT: sllv $1, $4, $7
; MMR6-NEXT: not16 $2, $7
; MMR6-NEXT: srl16 $3, $5, 1
; MMR6-NEXT: srlv $2, $3, $2
; MMR6-NEXT: or $1, $1, $2
; MMR6-NEXT: andi16 $3, $7, 32
; MMR6-NEXT: seleqz $1, $1, $3
; MMR6-NEXT: sllv $4, $5, $7
; MMR6-NEXT: selnez $2, $4, $3
; MMR6-NEXT: or $2, $2, $1
; MMR6-NEXT: seleqz $3, $3, $4
; MMR6-NEXT: seleqz $3, $4, $3
; MMR6-NEXT: jrc $ra
entry:
@ -937,97 +937,85 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
;
; MMR6-LABEL: shl_i128:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: addiu $sp, $sp, -32
; MMR6-NEXT: .cfi_def_cfa_offset 32
; MMR6-NEXT: sw $17, 28($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 24($sp) # 4-byte Folded Spill
; MMR6-NEXT: addiu $sp, $sp, -16
; MMR6-NEXT: .cfi_def_cfa_offset 16
; MMR6-NEXT: sw $17, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 17, -4
; MMR6-NEXT: .cfi_offset 16, -8
; MMR6-NEXT: sw $6, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $1, $4
; MMR6-NEXT: lw $3, 60($sp)
; MMR6-NEXT: sllv $2, $4, $3
; MMR6-NEXT: not16 $4, $3
; MMR6-NEXT: sw $4, 16($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $5, 20($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $11, $4
; MMR6-NEXT: lw $3, 44($sp)
; MMR6-NEXT: sllv $1, $4, $3
; MMR6-NEXT: not16 $2, $3
; MMR6-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: srl16 $16, $5, 1
; MMR6-NEXT: srlv $17, $16, $4
; MMR6-NEXT: or16 $17, $2
; MMR6-NEXT: srlv $8, $16, $2
; MMR6-NEXT: or $1, $1, $8
; MMR6-NEXT: sllv $8, $5, $3
; MMR6-NEXT: andi16 $16, $3, 32
; MMR6-NEXT: seleqz $4, $17, $16
; MMR6-NEXT: seleqz $1, $1, $16
; MMR6-NEXT: selnez $9, $8, $16
; MMR6-NEXT: li16 $17, 64
; MMR6-NEXT: subu16 $17, $17, $3
; MMR6-NEXT: srlv $10, $6, $17
; MMR6-NEXT: andi16 $2, $17, 32
; MMR6-NEXT: seleqz $5, $10, $2
; MMR6-NEXT: sw $5, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: or $4, $9, $4
; MMR6-NEXT: seleqz $12, $10, $2
; MMR6-NEXT: or $1, $9, $1
; MMR6-NEXT: selnez $9, $10, $2
; MMR6-NEXT: srlv $5, $7, $17
; MMR6-NEXT: sw $5, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: srlv $10, $7, $17
; MMR6-NEXT: not16 $17, $17
; MMR6-NEXT: sll16 $5, $6, 1
; MMR6-NEXT: sllv $5, $5, $17
; MMR6-NEXT: lw $17, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $5, $17
; MMR6-NEXT: seleqz $2, $5, $2
; MMR6-NEXT: addiu $5, $3, -64
; MMR6-NEXT: or $2, $9, $2
; MMR6-NEXT: sw $2, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: lw $2, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $4, $2
; MMR6-NEXT: sllv $2, $6, $5
; MMR6-NEXT: sw $2, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: srl16 $6, $7, 1
; MMR6-NEXT: not16 $17, $5
; MMR6-NEXT: srlv $2, $6, $17
; MMR6-NEXT: lw $17, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $2, $17
; MMR6-NEXT: andi16 $17, $5, 32
; MMR6-NEXT: seleqz $2, $2, $17
; MMR6-NEXT: sllv $12, $7, $5
; MMR6-NEXT: selnez $9, $12, $17
; MMR6-NEXT: sltiu $10, $3, 64
; MMR6-NEXT: selnez $11, $4, $10
; MMR6-NEXT: or $9, $9, $2
; MMR6-NEXT: lw $2, 4($sp) # 4-byte Folded Reload
; MMR6-NEXT: sllv $5, $2, $3
; MMR6-NEXT: lw $4, 16($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $6, $6, $4
; MMR6-NEXT: seleqz $4, $8, $16
; MMR6-NEXT: lw $2, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: or16 $4, $2
; MMR6-NEXT: or16 $6, $5
; MMR6-NEXT: seleqz $2, $9, $10
; MMR6-NEXT: li16 $5, 0
; MMR6-NEXT: seleqz $5, $5, $10
; MMR6-NEXT: sll16 $4, $6, 1
; MMR6-NEXT: sllv $4, $4, $17
; MMR6-NEXT: or $4, $4, $10
; MMR6-NEXT: seleqz $2, $4, $2
; MMR6-NEXT: addiu $4, $3, -64
; MMR6-NEXT: or $10, $9, $2
; MMR6-NEXT: or $1, $1, $12
; MMR6-NEXT: sllv $9, $6, $4
; MMR6-NEXT: srl16 $2, $7, 1
; MMR6-NEXT: not16 $17, $4
; MMR6-NEXT: srlv $12, $2, $17
; MMR6-NEXT: or $9, $9, $12
; MMR6-NEXT: andi16 $17, $4, 32
; MMR6-NEXT: seleqz $9, $9, $17
; MMR6-NEXT: sllv $14, $7, $4
; MMR6-NEXT: selnez $12, $14, $17
; MMR6-NEXT: sltiu $13, $3, 64
; MMR6-NEXT: selnez $1, $1, $13
; MMR6-NEXT: or $9, $12, $9
; MMR6-NEXT: sllv $6, $6, $3
; MMR6-NEXT: lw $4, 4($sp) # 4-byte Folded Reload
; MMR6-NEXT: srlv $2, $2, $4
; MMR6-NEXT: seleqz $8, $8, $16
; MMR6-NEXT: li16 $4, 0
; MMR6-NEXT: or $8, $8, $10
; MMR6-NEXT: or $6, $6, $2
; MMR6-NEXT: seleqz $2, $9, $13
; MMR6-NEXT: seleqz $9, $4, $13
; MMR6-NEXT: sllv $7, $7, $3
; MMR6-NEXT: seleqz $8, $7, $16
; MMR6-NEXT: selnez $8, $8, $10
; MMR6-NEXT: seleqz $1, $1, $3
; MMR6-NEXT: or $2, $11, $2
; MMR6-NEXT: selnez $2, $2, $3
; MMR6-NEXT: or $2, $1, $2
; MMR6-NEXT: or $1, $5, $8
; MMR6-NEXT: seleqz $10, $7, $16
; MMR6-NEXT: selnez $10, $10, $13
; MMR6-NEXT: seleqz $11, $11, $3
; MMR6-NEXT: or $1, $1, $2
; MMR6-NEXT: selnez $1, $1, $3
; MMR6-NEXT: or $2, $11, $1
; MMR6-NEXT: or $1, $9, $10
; MMR6-NEXT: seleqz $6, $6, $16
; MMR6-NEXT: selnez $7, $7, $16
; MMR6-NEXT: lw $16, 20($sp) # 4-byte Folded Reload
; MMR6-NEXT: seleqz $8, $16, $3
; MMR6-NEXT: selnez $4, $4, $10
; MMR6-NEXT: seleqz $9, $12, $17
; MMR6-NEXT: seleqz $9, $9, $10
; MMR6-NEXT: or $4, $4, $9
; MMR6-NEXT: seleqz $5, $5, $3
; MMR6-NEXT: selnez $8, $8, $13
; MMR6-NEXT: seleqz $4, $14, $17
; MMR6-NEXT: seleqz $4, $4, $13
; MMR6-NEXT: or $4, $8, $4
; MMR6-NEXT: selnez $3, $4, $3
; MMR6-NEXT: or $3, $8, $3
; MMR6-NEXT: or $3, $5, $3
; MMR6-NEXT: or $4, $7, $6
; MMR6-NEXT: selnez $4, $4, $10
; MMR6-NEXT: or $4, $5, $4
; MMR6-NEXT: selnez $4, $4, $13
; MMR6-NEXT: or $4, $9, $4
; MMR6-NEXT: move $5, $1
; MMR6-NEXT: lw $16, 24($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 28($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 32
; MMR6-NEXT: lw $16, 8($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 12($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 16
; MMR6-NEXT: jrc $ra
entry:

View File

@ -70,8 +70,7 @@ define signext i1 @xor_i1(i1 signext %a, i1 signext %b) {
;
; MM32R6-LABEL: xor_i1:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: xor16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: xor $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = xor i1 %a, %b
@ -120,8 +119,7 @@ define signext i8 @xor_i8(i8 signext %a, i8 signext %b) {
;
; MM32R6-LABEL: xor_i8:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: xor16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: xor $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = xor i8 %a, %b
@ -170,8 +168,7 @@ define signext i16 @xor_i16(i16 signext %a, i16 signext %b) {
;
; MM32R6-LABEL: xor_i16:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: xor16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: xor $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = xor i16 %a, %b
@ -220,8 +217,7 @@ define signext i32 @xor_i32(i32 signext %a, i32 signext %b) {
;
; MM32R6-LABEL: xor_i32:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: xor16 $4, $5
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: xor $2, $4, $5
; MM32R6-NEXT: jrc $ra
entry:
%r = xor i32 %a, %b
@ -272,10 +268,8 @@ define signext i64 @xor_i64(i64 signext %a, i64 signext %b) {
;
; MM32R6-LABEL: xor_i64:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: xor16 $4, $6
; MM32R6-NEXT: xor16 $5, $7
; MM32R6-NEXT: move $2, $4
; MM32R6-NEXT: move $3, $5
; MM32R6-NEXT: xor $2, $4, $6
; MM32R6-NEXT: xor $3, $5, $7
; MM32R6-NEXT: jrc $ra
entry:
%r = xor i64 %a, %b
@ -351,14 +345,14 @@ define signext i128 @xor_i128(i128 signext %a, i128 signext %b) {
;
; MM32R6-LABEL: xor_i128:
; MM32R6: # %bb.0: # %entry
; MM32R6-NEXT: lw $3, 20($sp)
; MM32R6-NEXT: lw $1, 20($sp)
; MM32R6-NEXT: lw $2, 16($sp)
; MM32R6-NEXT: xor16 $2, $4
; MM32R6-NEXT: xor16 $3, $5
; MM32R6-NEXT: lw $4, 24($sp)
; MM32R6-NEXT: xor16 $4, $6
; MM32R6-NEXT: lw $5, 28($sp)
; MM32R6-NEXT: xor16 $5, $7
; MM32R6-NEXT: xor $2, $4, $2
; MM32R6-NEXT: xor $3, $5, $1
; MM32R6-NEXT: lw $1, 24($sp)
; MM32R6-NEXT: xor $4, $6, $1
; MM32R6-NEXT: lw $1, 28($sp)
; MM32R6-NEXT: xor $5, $7, $1
; MM32R6-NEXT: jrc $ra
entry:
%r = xor i128 %a, %b