diff --git a/llvm/test/CodeGen/X86/haddsub-shuf.ll b/llvm/test/CodeGen/X86/haddsub-shuf.ll
index 5b2f3eb7645c..2741ba5af0da 100644
--- a/llvm/test/CodeGen/X86/haddsub-shuf.ll
+++ b/llvm/test/CodeGen/X86/haddsub-shuf.ll
@@ -1116,3 +1116,188 @@ define <4 x i32> @hsub_4i32_v8i32_shuffle(<8 x i32> %a0) {
   %hsub = add <4 x i32> %hsub0, %hsub1
   ret <4 x i32> %hsub
 }
+
+;
+; fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) --> SHUFFLE(HOP(X,Y)).
+;
+
+define <4 x double> @hadd_4f64_v4f64_shuffle(<4 x double> %a0, <4 x double> %a1) {
+; SSSE3-LABEL: hadd_4f64_v4f64_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    haddpd %xmm1, %xmm0
+; SSSE3-NEXT:    haddpd %xmm3, %xmm2
+; SSSE3-NEXT:    movapd %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: hadd_4f64_v4f64_shuffle:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vhaddpd %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    retq
+  %shuf0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+  %shuf1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+  %hadd0 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+  %hadd1 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+  %hadd = fadd <4 x double> %hadd0, %hadd1
+  ret <4 x double> %hadd
+}
+
+define <4 x double> @hsub_4f64_v4f64_shuffle(<4 x double> %a0, <4 x double> %a1) {
+; SSSE3-LABEL: hsub_4f64_v4f64_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    hsubpd %xmm1, %xmm0
+; SSSE3-NEXT:    hsubpd %xmm3, %xmm2
+; SSSE3-NEXT:    movapd %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: hsub_4f64_v4f64_shuffle:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vhsubpd %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    retq
+  %shuf0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+  %shuf1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+  %hadd0 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+  %hadd1 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+  %hadd = fsub <4 x double> %hadd0, %hadd1
+  ret <4 x double> %hadd
+}
+
+define <8 x float> @hadd_8f32_v8f32_shuffle(<8 x float> %a0, <8 x float> %a1) {
+; SSSE3-LABEL: hadd_8f32_v8f32_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    haddps %xmm1, %xmm0
+; SSSE3-NEXT:    haddps %xmm3, %xmm2
+; SSSE3-NEXT:    movaps %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: hadd_8f32_v8f32_shuffle:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vhaddps %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    retq
+  %shuf0 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+  %shuf1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+  %hadd0 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
+  %hadd1 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15>
+  %hadd = fadd <8 x float> %hadd0, %hadd1
+  ret <8 x float> %hadd
+}
+
+define <8 x float> @hsub_8f32_v8f32_shuffle(<8 x float> %a0, <8 x float> %a1) {
+; SSSE3-LABEL: hsub_8f32_v8f32_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    haddps %xmm1, %xmm0
+; SSSE3-NEXT:    haddps %xmm3, %xmm2
+; SSSE3-NEXT:    movaps %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: hsub_8f32_v8f32_shuffle:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vhaddps %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    retq
+  %shuf0 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+  %shuf1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+  %hsub0 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
+  %hsub1 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15>
+  %hsub = fadd <8 x float> %hsub0, %hsub1
+  ret <8 x float> %hsub
+}
+
+define <8 x i32> @hadd_8i32_v8i32_shuffle(<8 x i32> %a0, <8 x i32> %a1) {
+; SSSE3-LABEL: hadd_8i32_v8i32_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    phaddd %xmm1, %xmm0
+; SSSE3-NEXT:    phaddd %xmm3, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: hadd_8i32_v8i32_shuffle:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vphaddd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vphaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: hadd_8i32_v8i32_shuffle:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT:    vphaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+  %shuf0 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+  %shuf1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+  %hadd0 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
+  %hadd1 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15>
+  %hadd = add <8 x i32> %hadd0, %hadd1
+  ret <8 x i32> %hadd
+}
+
+define <8 x i32> @hsub_8i32_v8i32_shuffle(<8 x i32> %a0, <8 x i32> %a1) {
+; SSSE3-LABEL: hsub_8i32_v8i32_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    phsubd %xmm1, %xmm0
+; SSSE3-NEXT:    phsubd %xmm3, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: hsub_8i32_v8i32_shuffle:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vphsubd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vphsubd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: hsub_8i32_v8i32_shuffle:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT:    vphsubd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+  %shuf0 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+  %shuf1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+  %hadd0 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
+  %hadd1 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15>
+  %hadd = sub <8 x i32> %hadd0, %hadd1
+  ret <8 x i32> %hadd
+}
+
+define <16 x i16> @hadd_16i16_16i16_shuffle(<16 x i16> %a0, <16 x i16> %a1) {
+; SSSE3-LABEL: hadd_16i16_16i16_shuffle:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    phaddw %xmm1, %xmm0
+; SSSE3-NEXT:    phaddw %xmm3, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: hadd_16i16_16i16_shuffle:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vphaddw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vphaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: hadd_16i16_16i16_shuffle:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT:    vphaddw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+  %shuf0 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+  %shuf1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %hadd0 = shufflevector <16 x i16> %shuf0, <16 x i16> %shuf1, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
+  %hadd1 = shufflevector <16 x i16> %shuf0, <16 x i16> %shuf1, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31>
+  %hadd = add <16 x i16> %hadd0, %hadd1
+  ret <16 x i16> %hadd
+}