diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 2f533c28e95d..dc1f4bcf71da 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -126,14 +126,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); - setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); - for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) - setTruncStoreAction(VT.getSimpleVT(), - (MVT::SimpleValueType)InnerVT, Expand); } - setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); // Promote all bit-wise operations. if (VT.isInteger() && VT != PromotedBitwiseVT) { @@ -442,6 +435,17 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setTruncStoreAction(MVT::f64, MVT::f32, Expand); } + for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) + setTruncStoreAction((MVT::SimpleValueType)VT, + (MVT::SimpleValueType)InnerVT, Expand); + setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); + setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); + } + if (Subtarget->hasNEON()) { addDRTypeForNEON(MVT::v2f32); addDRTypeForNEON(MVT::v8i8); @@ -483,8 +487,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); - setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); - // Neon does not support some operations on v1i64 and v2i64 types. setOperationAction(ISD::MUL, MVT::v1i64, Expand); // Custom handling for some quad-vector types to detect VMULL. diff --git a/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll b/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll new file mode 100644 index 000000000000..af43671791a4 --- /dev/null +++ b/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll @@ -0,0 +1,15 @@ +; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s +; PR11319 + +@i8_res = global <2 x i8> +@i8_src1 = global <2 x i8> +@i8_src2 = global <2 x i8> + +define void @test_neon_vector_add_2xi8() nounwind { +; CHECK: test_neon_vector_add_2xi8: + %1 = load <2 x i8>* @i8_src1 + %2 = load <2 x i8>* @i8_src2 + %3 = add <2 x i8> %1, %2 + store <2 x i8> %3, <2 x i8>* @i8_res + ret void +}