Remove some now-unneeded casts from instruction patterns. With the casts

removed, tblgen produces identical output to with them in.

llvm-svn: 28867
This commit is contained in:
Chris Lattner 2006-06-20 00:39:56 +00:00
parent 94d18df658
commit 868a75bec6
2 changed files with 22 additions and 22 deletions

View File

@ -546,7 +546,7 @@ def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VRRC)>;
def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VRRC)>; def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VRRC)>;
// Loads. // Loads.
def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>; def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>;
// Stores. // Stores.
def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst), def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
@ -594,29 +594,29 @@ def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHW_unary_shuffle_mask:$in),
(VMRGHW VRRC:$vA, VRRC:$vA)>; (VMRGHW VRRC:$vA, VRRC:$vA)>;
// Logical Operations // Logical Operations
def : Pat<(v4i32 (vnot VRRC:$vA)), (v4i32 (VNOR VRRC:$vA, VRRC:$vA))>; def : Pat<(v4i32 (vnot VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>;
def : Pat<(v4i32 (vnot_conv VRRC:$vA)), (v4i32 (VNOR VRRC:$vA, VRRC:$vA))>; def : Pat<(v4i32 (vnot_conv VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>;
def : Pat<(v4i32 (vnot_conv (or VRRC:$A, VRRC:$B))), def : Pat<(v4i32 (vnot_conv (or VRRC:$A, VRRC:$B))),
(v4i32 (VNOR VRRC:$A, VRRC:$B))>; (VNOR VRRC:$A, VRRC:$B)>;
def : Pat<(v4i32 (and VRRC:$A, (vnot_conv VRRC:$B))), def : Pat<(v4i32 (and VRRC:$A, (vnot_conv VRRC:$B))),
(v4i32 (VANDC VRRC:$A, VRRC:$B))>; (VANDC VRRC:$A, VRRC:$B)>;
def : Pat<(fmul VRRC:$vA, VRRC:$vB), def : Pat<(fmul VRRC:$vA, VRRC:$vB),
(v4f32 (VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0))))>; (VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0)))>;
// Fused multiply add and multiply sub for packed float. These are represented // Fused multiply add and multiply sub for packed float. These are represented
// separately from the real instructions above, for operations that must have // separately from the real instructions above, for operations that must have
// the additional precision, such as Newton-Rhapson (used by divide, sqrt) // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C), def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
(v4f32 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C))>; (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C), def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
(v4f32 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C))>; (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C), def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
(v4f32 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C))>; (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C), def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
(v4f32 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C))>; (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC), def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC),
(v16i8 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>; (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC)>;

View File

@ -490,7 +490,7 @@ def EQV : XForm_6<31, 284, (ops GPRC:$rA, GPRC:$rS, GPRC:$rB),
[(set GPRC:$rA, (not (xor GPRC:$rS, GPRC:$rB)))]>; [(set GPRC:$rA, (not (xor GPRC:$rS, GPRC:$rB)))]>;
def XOR : XForm_6<31, 316, (ops GPRC:$rA, GPRC:$rS, GPRC:$rB), def XOR : XForm_6<31, 316, (ops GPRC:$rA, GPRC:$rS, GPRC:$rB),
"xor $rA, $rS, $rB", IntGeneral, "xor $rA, $rS, $rB", IntGeneral,
[(set GPRC:$rA, (xor GPRC:$rS, GPRC:$rB))]>; [(set GPRC:$rA, (xor GPRC:$rS, GPRC:$rB))]>;
def SLW : XForm_6<31, 24, (ops GPRC:$rA, GPRC:$rS, GPRC:$rB), def SLW : XForm_6<31, 24, (ops GPRC:$rA, GPRC:$rS, GPRC:$rB),
"slw $rA, $rS, $rB", IntGeneral, "slw $rA, $rS, $rB", IntGeneral,
[(set GPRC:$rA, (PPCshl GPRC:$rS, GPRC:$rB))]>; [(set GPRC:$rA, (PPCshl GPRC:$rS, GPRC:$rB))]>;
@ -936,25 +936,25 @@ def : Pat<(srl GPRC:$rS, GPRC:$rB),
def : Pat<(shl GPRC:$rS, GPRC:$rB), def : Pat<(shl GPRC:$rS, GPRC:$rB),
(SLW GPRC:$rS, GPRC:$rB)>; (SLW GPRC:$rS, GPRC:$rB)>;
def : Pat<(i32 (zextload iaddr:$src, i1)), def : Pat<(zextload iaddr:$src, i1),
(LBZ iaddr:$src)>; (LBZ iaddr:$src)>;
def : Pat<(i32 (zextload xaddr:$src, i1)), def : Pat<(zextload xaddr:$src, i1),
(LBZX xaddr:$src)>; (LBZX xaddr:$src)>;
def : Pat<(i32 (extload iaddr:$src, i1)), def : Pat<(extload iaddr:$src, i1),
(LBZ iaddr:$src)>; (LBZ iaddr:$src)>;
def : Pat<(i32 (extload xaddr:$src, i1)), def : Pat<(extload xaddr:$src, i1),
(LBZX xaddr:$src)>; (LBZX xaddr:$src)>;
def : Pat<(i32 (extload iaddr:$src, i8)), def : Pat<(extload iaddr:$src, i8),
(LBZ iaddr:$src)>; (LBZ iaddr:$src)>;
def : Pat<(i32 (extload xaddr:$src, i8)), def : Pat<(extload xaddr:$src, i8),
(LBZX xaddr:$src)>; (LBZX xaddr:$src)>;
def : Pat<(i32 (extload iaddr:$src, i16)), def : Pat<(extload iaddr:$src, i16),
(LHZ iaddr:$src)>; (LHZ iaddr:$src)>;
def : Pat<(i32 (extload xaddr:$src, i16)), def : Pat<(extload xaddr:$src, i16),
(LHZX xaddr:$src)>; (LHZX xaddr:$src)>;
def : Pat<(f64 (extload iaddr:$src, f32)), def : Pat<(extload iaddr:$src, f32),
(FMRSD (LFS iaddr:$src))>; (FMRSD (LFS iaddr:$src))>;
def : Pat<(f64 (extload xaddr:$src, f32)), def : Pat<(extload xaddr:$src, f32),
(FMRSD (LFSX xaddr:$src))>; (FMRSD (LFSX xaddr:$src))>;
include "PPCInstrAltivec.td" include "PPCInstrAltivec.td"