forked from OSchip/llvm-project
Add versions 256-bit versions of alignedstore and alignedload, to be
more strict about the alignment checking. This was found by inspection and I don't have any testcases so far, although the llvm testsuite runs without any problem. llvm-svn: 139625
This commit is contained in:
parent
56d9b51caf
commit
03d6002d68
|
@ -204,17 +204,28 @@ def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
|
||||||
def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
|
def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
|
||||||
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
|
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
|
||||||
|
|
||||||
// Like 'store', but always requires vector alignment.
|
// Like 'store', but always requires 128-bit vector alignment.
|
||||||
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
|
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
|
||||||
(store node:$val, node:$ptr), [{
|
(store node:$val, node:$ptr), [{
|
||||||
return cast<StoreSDNode>(N)->getAlignment() >= 16;
|
return cast<StoreSDNode>(N)->getAlignment() >= 16;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
// Like 'load', but always requires vector alignment.
|
// Like 'store', but always requires 256-bit vector alignment.
|
||||||
|
def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
|
||||||
|
(store node:$val, node:$ptr), [{
|
||||||
|
return cast<StoreSDNode>(N)->getAlignment() >= 32;
|
||||||
|
}]>;
|
||||||
|
|
||||||
|
// Like 'load', but always requires 128-bit vector alignment.
|
||||||
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
||||||
return cast<LoadSDNode>(N)->getAlignment() >= 16;
|
return cast<LoadSDNode>(N)->getAlignment() >= 16;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
|
// Like 'load', but always requires 256-bit vector alignment.
|
||||||
|
def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
||||||
|
return cast<LoadSDNode>(N)->getAlignment() >= 32;
|
||||||
|
}]>;
|
||||||
|
|
||||||
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
|
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
|
||||||
(f32 (alignedload node:$ptr))>;
|
(f32 (alignedload node:$ptr))>;
|
||||||
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
|
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
|
||||||
|
@ -232,13 +243,13 @@ def alignedloadv2i64 : PatFrag<(ops node:$ptr),
|
||||||
|
|
||||||
// 256-bit aligned load pattern fragments
|
// 256-bit aligned load pattern fragments
|
||||||
def alignedloadv8f32 : PatFrag<(ops node:$ptr),
|
def alignedloadv8f32 : PatFrag<(ops node:$ptr),
|
||||||
(v8f32 (alignedload node:$ptr))>;
|
(v8f32 (alignedload256 node:$ptr))>;
|
||||||
def alignedloadv4f64 : PatFrag<(ops node:$ptr),
|
def alignedloadv4f64 : PatFrag<(ops node:$ptr),
|
||||||
(v4f64 (alignedload node:$ptr))>;
|
(v4f64 (alignedload256 node:$ptr))>;
|
||||||
def alignedloadv8i32 : PatFrag<(ops node:$ptr),
|
def alignedloadv8i32 : PatFrag<(ops node:$ptr),
|
||||||
(v8i32 (alignedload node:$ptr))>;
|
(v8i32 (alignedload256 node:$ptr))>;
|
||||||
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
|
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
|
||||||
(v4i64 (alignedload node:$ptr))>;
|
(v4i64 (alignedload256 node:$ptr))>;
|
||||||
|
|
||||||
// Like 'load', but uses special alignment checks suitable for use in
|
// Like 'load', but uses special alignment checks suitable for use in
|
||||||
// memory operands in most SSE instructions, which are required to
|
// memory operands in most SSE instructions, which are required to
|
||||||
|
|
|
@ -717,10 +717,10 @@ def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
||||||
[(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
|
[(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
|
||||||
def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
|
def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
|
||||||
"movaps\t{$src, $dst|$dst, $src}",
|
"movaps\t{$src, $dst|$dst, $src}",
|
||||||
[(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
|
[(alignedstore256 (v8f32 VR256:$src), addr:$dst)]>, VEX;
|
||||||
def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
|
def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
|
||||||
"movapd\t{$src, $dst|$dst, $src}",
|
"movapd\t{$src, $dst|$dst, $src}",
|
||||||
[(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
|
[(alignedstore256 (v4f64 VR256:$src), addr:$dst)]>, VEX;
|
||||||
def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
|
def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
|
||||||
"movups\t{$src, $dst|$dst, $src}",
|
"movups\t{$src, $dst|$dst, $src}",
|
||||||
[(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
|
[(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
|
||||||
|
@ -872,13 +872,13 @@ let Predicates = [HasAVX] in {
|
||||||
(VMOVAPSYrm addr:$src)>;
|
(VMOVAPSYrm addr:$src)>;
|
||||||
def : Pat<(loadv8i32 addr:$src),
|
def : Pat<(loadv8i32 addr:$src),
|
||||||
(VMOVUPSYrm addr:$src)>;
|
(VMOVUPSYrm addr:$src)>;
|
||||||
def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
|
def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
|
||||||
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
||||||
def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
|
def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
|
||||||
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
||||||
def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
|
def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
|
||||||
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
||||||
def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
|
def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
|
||||||
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
(VMOVAPSYmr addr:$dst, VR256:$src)>;
|
||||||
def : Pat<(store (v4i64 VR256:$src), addr:$dst),
|
def : Pat<(store (v4i64 VR256:$src), addr:$dst),
|
||||||
(VMOVUPSYmr addr:$dst, VR256:$src)>;
|
(VMOVUPSYmr addr:$dst, VR256:$src)>;
|
||||||
|
|
Loading…
Reference in New Issue