diff options
author | Nikita Popov <nikita.ppv@gmail.com> | 2019-01-14 22:18:39 +0000 |
---|---|---|
committer | Nikita Popov <nikita.ppv@gmail.com> | 2019-01-14 22:18:39 +0000 |
commit | 458c10d3a509c171be297dc4deb46038d2b1fab6 (patch) | |
tree | 6203ee3d29b86ff62d6e117299cfd13eb245d4a2 /llvm | |
parent | 81f7c7af1cd6a56690cb17a3b4831e6d20b2c624 (diff) |
Revert "[CodeGen][X86] Expand USUBSAT to UMAX+SUB, also for vectors"
This reverts commit r351125.
I missed test changes in an SLPVectorizer test, due to the cost model
changes. Reverting for now.
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86TargetTransformInfo.cpp | 7 | ||||
-rw-r--r-- | llvm/test/Analysis/CostModel/X86/arith-usat.ll | 121 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/usub_sat.ll | 40 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/usub_sat_vec.ll | 2229 |
6 files changed, 1651 insertions, 778 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 4923a529c21..6e0bc97e92b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -141,7 +141,6 @@ class VectorLegalizer { SDValue ExpandFunnelShift(SDValue Op); SDValue ExpandROT(SDValue Op); SDValue ExpandFMINNUM_FMAXNUM(SDValue Op); - SDValue ExpandAddSubSat(SDValue Op); SDValue ExpandStrictFPOp(SDValue Op); /// Implements vector promotion. @@ -778,11 +777,6 @@ SDValue VectorLegalizer::Expand(SDValue Op) { case ISD::FMINNUM: case ISD::FMAXNUM: return ExpandFMINNUM_FMAXNUM(Op); - case ISD::USUBSAT: - case ISD::SSUBSAT: - case ISD::UADDSAT: - case ISD::SADDSAT: - return ExpandAddSubSat(Op); case ISD::STRICT_FADD: case ISD::STRICT_FSUB: case ISD::STRICT_FMUL: @@ -1212,12 +1206,6 @@ SDValue VectorLegalizer::ExpandFMINNUM_FMAXNUM(SDValue Op) { return DAG.UnrollVectorOp(Op.getNode()); } -SDValue VectorLegalizer::ExpandAddSubSat(SDValue Op) { - if (SDValue Expanded = TLI.expandAddSubSat(Op.getNode(), DAG)) - return Expanded; - return DAG.UnrollVectorOp(Op.getNode()); -} - SDValue VectorLegalizer::ExpandStrictFPOp(SDValue Op) { EVT VT = Op.getValueType(); EVT EltVT = VT.getVectorElementType(); diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index a2f05c1e3ce..3c757440367 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -5277,22 +5277,6 @@ SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { unsigned Opcode = Node->getOpcode(); - SDValue LHS = Node->getOperand(0); - SDValue RHS = Node->getOperand(1); - EVT VT = LHS.getValueType(); - SDLoc dl(Node); - - // usub.sat(a, b) -> umax(a, b) - b - if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) { - SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); - return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); - } - - if (VT.isVector()) { - // TODO: Consider not scalarizing here. - return SDValue(); - } - unsigned OverflowOp; switch (Opcode) { case ISD::SADDSAT: @@ -5311,7 +5295,11 @@ SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { llvm_unreachable("Expected method to receive signed or unsigned saturation " "addition or subtraction node."); } + assert(Node->getNumOperands() == 2 && "Expected node to have 2 operands."); + SDLoc dl(Node); + SDValue LHS = Node->getOperand(0); + SDValue RHS = Node->getOperand(1); assert(LHS.getValueType().isScalarInteger() && "Expected operands to be integers. Vector of int arguments should " "already be unrolled."); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 36929a4f543..a3592565c0f 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -1780,10 +1780,6 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, { ISD::CTPOP, MVT::v16i32, 24 }, { ISD::CTTZ, MVT::v8i64, 20 }, { ISD::CTTZ, MVT::v16i32, 28 }, - { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd - { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq - { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq - { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq }; static const CostTblEntry XOPCostTbl[] = { { ISD::BITREVERSE, MVT::v4i64, 4 }, @@ -1827,7 +1823,6 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, { ISD::UADDSAT, MVT::v32i8, 1 }, { ISD::USUBSAT, MVT::v16i16, 1 }, { ISD::USUBSAT, MVT::v32i8, 1 }, - { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ @@ -1863,7 +1858,6 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert - { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ @@ -1884,7 +1878,6 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd }; static const CostTblEntry SSE42CostTbl[] = { - { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ }; diff --git a/llvm/test/Analysis/CostModel/X86/arith-usat.ll b/llvm/test/Analysis/CostModel/X86/arith-usat.ll index 4d0df9d8f98..03bd5b09e2b 100644 --- a/llvm/test/Analysis/CostModel/X86/arith-usat.ll +++ b/llvm/test/Analysis/CostModel/X86/arith-usat.ll @@ -250,43 +250,24 @@ declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) declare <64 x i8> @llvm.usub.sat.v64i8(<64 x i8>, <64 x i8>) define i32 @sub(i32 %arg) { -; SSSE3-LABEL: 'sub' -; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef) -; SSSE3-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef -; -; SSE42-LABEL: 'sub' -; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef) -; SSE42-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; SSE-LABEL: 'sub' +; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef) +; SSE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef ; ; AVX1-LABEL: 'sub' ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) @@ -294,9 +275,9 @@ define i32 @sub(i32 %arg) { ; AVX1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; AVX1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX1-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX1-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -313,9 +294,9 @@ define i32 @sub(i32 %arg) { ; AVX2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; AVX2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -328,13 +309,13 @@ define i32 @sub(i32 %arg) { ; ; AVX512F-LABEL: 'sub' ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -347,13 +328,13 @@ define i32 @sub(i32 %arg) { ; ; AVX512BW-LABEL: 'sub' ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -366,13 +347,13 @@ define i32 @sub(i32 %arg) { ; ; AVX512DQ-LABEL: 'sub' ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -389,9 +370,9 @@ define i32 @sub(i32 %arg) { ; SLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; SLM-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; SLM-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; SLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; SLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; SLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; SLM-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -408,9 +389,9 @@ define i32 @sub(i32 %arg) { ; GLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; GLM-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; GLM-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; GLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; GLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; GLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; GLM-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -427,9 +408,9 @@ define i32 @sub(i32 %arg) { ; BTVER2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef) -; BTVER2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; BTVER2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; BTVER2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; BTVER2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; BTVER2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; BTVER2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef) diff --git a/llvm/test/CodeGen/X86/usub_sat.ll b/llvm/test/CodeGen/X86/usub_sat.ll index ef822faafeb..a1efc2ca570 100644 --- a/llvm/test/CodeGen/X86/usub_sat.ll +++ b/llvm/test/CodeGen/X86/usub_sat.ll @@ -112,15 +112,37 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { ; ; X64-LABEL: vec: ; X64: # %bb.0: -; X64-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] -; X64-NEXT: movdqa %xmm1, %xmm3 -; X64-NEXT: pxor %xmm2, %xmm3 -; X64-NEXT: pxor %xmm0, %xmm2 -; X64-NEXT: pcmpgtd %xmm3, %xmm2 -; X64-NEXT: pand %xmm2, %xmm0 -; X64-NEXT: pandn %xmm1, %xmm2 -; X64-NEXT: por %xmm2, %xmm0 -; X64-NEXT: psubd %xmm1, %xmm0 +; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; X64-NEXT: movd %xmm2, %eax +; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; X64-NEXT: movd %xmm2, %ecx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: subl %eax, %ecx +; X64-NEXT: cmovbl %edx, %ecx +; X64-NEXT: movd %ecx, %xmm2 +; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; X64-NEXT: movd %xmm3, %eax +; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; X64-NEXT: movd %xmm3, %ecx +; X64-NEXT: subl %eax, %ecx +; X64-NEXT: cmovbl %edx, %ecx +; X64-NEXT: movd %ecx, %xmm3 +; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; X64-NEXT: movd %xmm1, %eax +; X64-NEXT: movd %xmm0, %ecx +; X64-NEXT: subl %eax, %ecx +; X64-NEXT: cmovbl %edx, %ecx +; X64-NEXT: movd %ecx, %xmm2 +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; X64-NEXT: movd %xmm1, %eax +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; X64-NEXT: movd %xmm0, %ecx +; X64-NEXT: subl %eax, %ecx +; X64-NEXT: cmovbl %edx, %ecx +; X64-NEXT: movd %ecx, %xmm0 +; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; X64-NEXT: movdqa %xmm2, %xmm0 ; X64-NEXT: retq %tmp = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y); ret <4 x i32> %tmp; diff --git a/llvm/test/CodeGen/X86/usub_sat_vec.ll b/llvm/test/CodeGen/X86/usub_sat_vec.ll index 72c0c51ab74..24713a4bdb7 100644 --- a/llvm/test/CodeGen/X86/usub_sat_vec.ll +++ b/llvm/test/CodeGen/X86/usub_sat_vec.ll @@ -634,106 +634,84 @@ define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { ; SSE2-LABEL: v2i32: ; SSE2: # %bb.0: ; SSE2-NEXT: psllq $32, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: pxor %xmm2, %xmm3 +; SSE2-NEXT: movq %xmm1, %rax ; SSE2-NEXT: psllq $32, %xmm0 -; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] -; SSE2-NEXT: por %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: pandn %xmm1, %xmm3 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: psubq %xmm1, %xmm0 -; SSE2-NEXT: psrlq $32, %xmm0 +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSE2-NEXT: psrlq $32, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v2i32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: psllq $32, %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] -; SSSE3-NEXT: movdqa %xmm1, %xmm3 -; SSSE3-NEXT: pxor %xmm2, %xmm3 +; SSSE3-NEXT: movq %xmm1, %rax ; SSSE3-NEXT: psllq $32, %xmm0 -; SSSE3-NEXT: pxor %xmm0, %xmm2 -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSSE3-NEXT: pand %xmm5, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] -; SSSE3-NEXT: por %xmm2, %xmm3 -; SSSE3-NEXT: pand %xmm3, %xmm0 -; SSSE3-NEXT: pandn %xmm1, %xmm3 -; SSSE3-NEXT: por %xmm3, %xmm0 -; SSSE3-NEXT: psubq %xmm1, %xmm0 -; SSSE3-NEXT: psrlq $32, %xmm0 +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm1, %rax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSSE3-NEXT: psrlq $32, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v2i32: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllq $32, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456] -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: pxor %xmm0, %xmm3 -; SSE41-NEXT: psllq $32, %xmm2 -; SSE41-NEXT: pxor %xmm2, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm4 -; SSE41-NEXT: pcmpgtd %xmm3, %xmm4 -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm3, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE41-NEXT: pand %xmm5, %xmm0 -; SSE41-NEXT: por %xmm4, %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3 -; SSE41-NEXT: psubq %xmm1, %xmm3 -; SSE41-NEXT: psrlq $32, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pextrq $1, %xmm1, %rax +; SSE41-NEXT: psllq $32, %xmm0 +; SSE41-NEXT: pextrq $1, %xmm0, %rcx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm2 +; SSE41-NEXT: movq %xmm1, %rax +; SSE41-NEXT: movq %xmm0, %rcx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE41-NEXT: psrlq $32, %xmm0 ; SSE41-NEXT: retq ; -; AVX1-LABEL: v2i32: -; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3 -; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: v2i32: -; AVX2: # %bb.0: -; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] -; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3 -; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2 -; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512-LABEL: v2i32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX512-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpsrlq $32, %xmm0, %xmm0 -; AVX512-NEXT: retq +; AVX-LABEL: v2i32: +; AVX: # %bb.0: +; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX-NEXT: vpextrq $1, %xmm1, %rax +; AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX-NEXT: vpextrq $1, %xmm0, %rcx +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: subq %rax, %rcx +; AVX-NEXT: cmovbq %rdx, %rcx +; AVX-NEXT: vmovq %rcx, %xmm2 +; AVX-NEXT: vmovq %xmm1, %rax +; AVX-NEXT: vmovq %xmm0, %rcx +; AVX-NEXT: subq %rax, %rcx +; AVX-NEXT: cmovbq %rdx, %rcx +; AVX-NEXT: vmovq %rcx, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX-NEXT: retq %z = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %x, <2 x i32> %y) ret <2 x i32> %z } @@ -741,40 +719,123 @@ define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; SSE2-LABEL: v4i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: pxor %xmm2, %xmm3 -; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: pandn %xmm1, %xmm2 -; SSE2-NEXT: por %xmm2, %xmm0 -; SSE2-NEXT: psubd %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v4i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm1, %xmm3 -; SSSE3-NEXT: pxor %xmm2, %xmm3 -; SSSE3-NEXT: pxor %xmm0, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 -; SSSE3-NEXT: pand %xmm2, %xmm0 -; SSSE3-NEXT: pandn %xmm1, %xmm2 -; SSSE3-NEXT: por %xmm2, %xmm0 -; SSSE3-NEXT: psubd %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v4i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmaxud %xmm1, %xmm0 -; SSE41-NEXT: psubd %xmm1, %xmm0 +; SSE41-NEXT: pextrd $1, %xmm1, %eax +; SSE41-NEXT: pextrd $1, %xmm0, %ecx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: movd %xmm1, %eax +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: subl %eax, %esi +; SSE41-NEXT: cmovbl %edx, %esi +; SSE41-NEXT: movd %esi, %xmm2 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm2 +; SSE41-NEXT: pextrd $2, %xmm1, %eax +; SSE41-NEXT: pextrd $2, %xmm0, %ecx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: pinsrd $2, %ecx, %xmm2 +; SSE41-NEXT: pextrd $3, %xmm1, %eax +; SSE41-NEXT: pextrd $3, %xmm0, %ecx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: pinsrd $3, %ecx, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpextrd $1, %xmm1, %eax +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vmovd %xmm1, %eax +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: subl %eax, %esi +; AVX-NEXT: cmovbl %edx, %esi +; AVX-NEXT: vmovd %esi, %xmm2 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $2, %xmm1, %eax +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $3, %xmm1, %eax +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 ; AVX-NEXT: retq %z = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y) ret <4 x i32> %z @@ -783,79 +844,323 @@ define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { define <8 x i32> @v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { ; SSE2-LABEL: v8i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pxor %xmm5, %xmm6 ; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm4 -; SSE2-NEXT: pcmpgtd %xmm6, %xmm4 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm4 -; SSE2-NEXT: por %xmm0, %xmm4 -; SSE2-NEXT: psubd %xmm2, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm0 -; SSE2-NEXT: pxor %xmm5, %xmm0 -; SSE2-NEXT: pxor %xmm1, %xmm5 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm1 -; SSE2-NEXT: pandn %xmm3, %xmm5 -; SSE2-NEXT: por %xmm5, %xmm1 -; SSE2-NEXT: psubd %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm4, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] +; SSE2-NEXT: movd %xmm5, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1] +; SSE2-NEXT: movd %xmm5, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm5 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: movd %xmm4, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm4, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: movd %xmm1, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm2, %xmm6 -; SSSE3-NEXT: pxor %xmm5, %xmm6 ; SSSE3-NEXT: movdqa %xmm0, %xmm4 -; SSSE3-NEXT: pxor %xmm5, %xmm4 -; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4 -; SSSE3-NEXT: pand %xmm4, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm4 -; SSSE3-NEXT: por %xmm0, %xmm4 -; SSSE3-NEXT: psubd %xmm2, %xmm4 -; SSSE3-NEXT: movdqa %xmm3, %xmm0 -; SSSE3-NEXT: pxor %xmm5, %xmm0 -; SSSE3-NEXT: pxor %xmm1, %xmm5 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm1 -; SSSE3-NEXT: pandn %xmm3, %xmm5 -; SSSE3-NEXT: por %xmm5, %xmm1 -; SSSE3-NEXT: psubd %xmm3, %xmm1 -; SSSE3-NEXT: movdqa %xmm4, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1] +; SSSE3-NEXT: movd %xmm5, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: movd %xmm4, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm4, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm4 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: movd %xmm1, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSSE3-NEXT: movdqa %xmm2, %xmm1 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v8i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmaxud %xmm2, %xmm0 -; SSE41-NEXT: psubd %xmm2, %xmm0 -; SSE41-NEXT: pmaxud %xmm3, %xmm1 -; SSE41-NEXT: psubd %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: pextrd $1, %xmm2, %ecx +; SSE41-NEXT: pextrd $1, %xmm0, %edx +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: movd %xmm2, %ecx +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: subl %ecx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm0 +; SSE41-NEXT: pinsrd $1, %edx, %xmm0 +; SSE41-NEXT: pextrd $2, %xmm2, %ecx +; SSE41-NEXT: pextrd $2, %xmm4, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm0 +; SSE41-NEXT: pextrd $3, %xmm2, %ecx +; SSE41-NEXT: pextrd $3, %xmm4, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm0 +; SSE41-NEXT: pextrd $1, %xmm3, %ecx +; SSE41-NEXT: pextrd $1, %xmm1, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: movd %xmm3, %ecx +; SSE41-NEXT: movd %xmm1, %esi +; SSE41-NEXT: subl %ecx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm2 +; SSE41-NEXT: pinsrd $1, %edx, %xmm2 +; SSE41-NEXT: pextrd $2, %xmm3, %ecx +; SSE41-NEXT: pextrd $2, %xmm1, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm2 +; SSE41-NEXT: pextrd $3, %xmm3, %ecx +; SSE41-NEXT: pextrd $3, %xmm1, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: retq ; ; AVX1-LABEL: v8i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm2, %ecx ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm3 -; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $1, %xmm3, %edx +; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vmovd %xmm2, %ecx +; AVX1-NEXT: vmovd %xmm3, %esi +; AVX1-NEXT: subl %ecx, %esi +; AVX1-NEXT: cmovbl %eax, %esi +; AVX1-NEXT: vmovd %esi, %xmm4 +; AVX1-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrd $2, %xmm2, %ecx +; AVX1-NEXT: vpextrd $2, %xmm3, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrd $3, %xmm2, %ecx +; AVX1-NEXT: vpextrd $3, %xmm3, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm1, %ecx +; AVX1-NEXT: vpextrd $1, %xmm0, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vmovd %xmm1, %ecx +; AVX1-NEXT: vmovd %xmm0, %esi +; AVX1-NEXT: subl %ecx, %esi +; AVX1-NEXT: cmovbl %eax, %esi +; AVX1-NEXT: vmovd %esi, %xmm3 +; AVX1-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrd $2, %xmm1, %ecx +; AVX1-NEXT: vpextrd $2, %xmm0, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrd $3, %xmm1, %ecx +; AVX1-NEXT: vpextrd $3, %xmm0, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm2, %ecx +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpextrd $1, %xmm3, %edx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: vmovd %xmm3, %esi +; AVX2-NEXT: subl %ecx, %esi +; AVX2-NEXT: cmovbl %eax, %esi +; AVX2-NEXT: vmovd %esi, %xmm4 +; AVX2-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrd $2, %xmm2, %ecx +; AVX2-NEXT: vpextrd $2, %xmm3, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrd $3, %xmm2, %ecx +; AVX2-NEXT: vpextrd $3, %xmm3, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm1, %ecx +; AVX2-NEXT: vpextrd $1, %xmm0, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vmovd %xmm1, %ecx +; AVX2-NEXT: vmovd %xmm0, %esi +; AVX2-NEXT: subl %ecx, %esi +; AVX2-NEXT: cmovbl %eax, %esi +; AVX2-NEXT: vmovd %esi, %xmm3 +; AVX2-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: vpextrd $2, %xmm0, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $3, %xmm1, %ecx +; AVX2-NEXT: vpextrd $3, %xmm0, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpextrd $1, %xmm2, %ecx +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX512-NEXT: vpextrd $1, %xmm3, %edx +; AVX512-NEXT: xorl %eax, %eax +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vmovd %xmm2, %ecx +; AVX512-NEXT: vmovd %xmm3, %esi +; AVX512-NEXT: subl %ecx, %esi +; AVX512-NEXT: cmovbl %eax, %esi +; AVX512-NEXT: vmovd %esi, %xmm4 +; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX512-NEXT: vpextrd $2, %xmm2, %ecx +; AVX512-NEXT: vpextrd $2, %xmm3, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX512-NEXT: vpextrd $3, %xmm2, %ecx +; AVX512-NEXT: vpextrd $3, %xmm3, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 +; AVX512-NEXT: vpextrd $1, %xmm1, %ecx +; AVX512-NEXT: vpextrd $1, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vmovd %xmm1, %ecx +; AVX512-NEXT: vmovd %xmm0, %esi +; AVX512-NEXT: subl %ecx, %esi +; AVX512-NEXT: cmovbl %eax, %esi +; AVX512-NEXT: vmovd %esi, %xmm3 +; AVX512-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3 +; AVX512-NEXT: vpextrd $2, %xmm1, %ecx +; AVX512-NEXT: vpextrd $2, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3 +; AVX512-NEXT: vpextrd $3, %xmm1, %ecx +; AVX512-NEXT: vpextrd $3, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX512-NEXT: retq %z = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %x, <8 x i32> %y) ret <8 x i32> %z @@ -865,129 +1170,616 @@ define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind { ; SSE2-LABEL: v16i32: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm1, %xmm8 -; SSE2-NEXT: movdqa %xmm0, %xmm10 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm4, %xmm1 -; SSE2-NEXT: pxor %xmm9, %xmm1 -; SSE2-NEXT: pxor %xmm9, %xmm0 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-NEXT: pand %xmm0, %xmm10 -; SSE2-NEXT: pandn %xmm4, %xmm0 -; SSE2-NEXT: por %xmm10, %xmm0 -; SSE2-NEXT: psubd %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: pxor %xmm9, %xmm4 -; SSE2-NEXT: movdqa %xmm8, %xmm1 -; SSE2-NEXT: pxor %xmm9, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE2-NEXT: pand %xmm1, %xmm8 -; SSE2-NEXT: pandn %xmm5, %xmm1 -; SSE2-NEXT: por %xmm8, %xmm1 -; SSE2-NEXT: psubd %xmm5, %xmm1 -; SSE2-NEXT: movdqa %xmm6, %xmm5 -; SSE2-NEXT: pxor %xmm9, %xmm5 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pxor %xmm9, %xmm4 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm4 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: pandn %xmm6, %xmm4 -; SSE2-NEXT: por %xmm2, %xmm4 -; SSE2-NEXT: psubd %xmm6, %xmm4 -; SSE2-NEXT: movdqa %xmm7, %xmm2 -; SSE2-NEXT: pxor %xmm9, %xmm2 -; SSE2-NEXT: pxor %xmm3, %xmm9 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm9 -; SSE2-NEXT: pand %xmm9, %xmm3 -; SSE2-NEXT: pandn %xmm7, %xmm9 -; SSE2-NEXT: por %xmm9, %xmm3 -; SSE2-NEXT: psubd %xmm7, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm9 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm10 +; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: movd %xmm1, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,1,2,3] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,3] +; SSE2-NEXT: movd %xmm1, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] +; SSE2-NEXT: movd %xmm4, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSE2-NEXT: movd %xmm5, %ecx +; SSE2-NEXT: movd %xmm8, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] +; SSE2-NEXT: movd %xmm5, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,3] +; SSE2-NEXT: movd %xmm5, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm5 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3] +; SSE2-NEXT: movd %xmm4, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] +; SSE2-NEXT: movd %xmm5, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] +; SSE2-NEXT: movd %xmm5, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm5 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSE2-NEXT: movd %xmm6, %ecx +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3] +; SSE2-NEXT: movd %xmm6, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1] +; SSE2-NEXT: movd %xmm5, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] +; SSE2-NEXT: movd %xmm5, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm6 +; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1] +; SSE2-NEXT: movd %xmm7, %ecx +; SSE2-NEXT: movd %xmm3, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: subl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0] ; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm5, %xmm3 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v16i32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa %xmm1, %xmm8 -; SSSE3-NEXT: movdqa %xmm0, %xmm10 -; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm4, %xmm1 -; SSSE3-NEXT: pxor %xmm9, %xmm1 -; SSSE3-NEXT: pxor %xmm9, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSSE3-NEXT: pand %xmm0, %xmm10 -; SSSE3-NEXT: pandn %xmm4, %xmm0 -; SSSE3-NEXT: por %xmm10, %xmm0 -; SSSE3-NEXT: psubd %xmm4, %xmm0 -; SSSE3-NEXT: movdqa %xmm5, %xmm4 -; SSSE3-NEXT: pxor %xmm9, %xmm4 -; SSSE3-NEXT: movdqa %xmm8, %xmm1 -; SSSE3-NEXT: pxor %xmm9, %xmm1 -; SSSE3-NEXT: pcmpgtd %xmm4, %xmm1 -; SSSE3-NEXT: pand %xmm1, %xmm8 -; SSSE3-NEXT: pandn %xmm5, %xmm1 -; SSSE3-NEXT: por %xmm8, %xmm1 -; SSSE3-NEXT: psubd %xmm5, %xmm1 -; SSSE3-NEXT: movdqa %xmm6, %xmm5 -; SSSE3-NEXT: pxor %xmm9, %xmm5 -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pxor %xmm9, %xmm4 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm4 -; SSSE3-NEXT: pand %xmm4, %xmm2 -; SSSE3-NEXT: pandn %xmm6, %xmm4 -; SSSE3-NEXT: por %xmm2, %xmm4 -; SSSE3-NEXT: psubd %xmm6, %xmm4 -; SSSE3-NEXT: movdqa %xmm7, %xmm2 -; SSSE3-NEXT: pxor %xmm9, %xmm2 -; SSSE3-NEXT: pxor %xmm3, %xmm9 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm9 -; SSSE3-NEXT: pand %xmm9, %xmm3 -; SSSE3-NEXT: pandn %xmm7, %xmm9 -; SSSE3-NEXT: por %xmm9, %xmm3 -; SSSE3-NEXT: psubd %xmm7, %xmm3 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm9 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm10 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: movd %xmm1, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,1,2,3] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,3] +; SSSE3-NEXT: movd %xmm1, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] +; SSSE3-NEXT: movd %xmm4, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm4 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: movd %xmm8, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,3] +; SSSE3-NEXT: movd %xmm5, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3] +; SSSE3-NEXT: movd %xmm4, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] +; SSSE3-NEXT: movd %xmm5, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSSE3-NEXT: movd %xmm6, %ecx +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3] +; SSSE3-NEXT: movd %xmm6, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] +; SSSE3-NEXT: movd %xmm5, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm6 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1] +; SSSE3-NEXT: movd %xmm7, %ecx +; SSSE3-NEXT: movd %xmm3, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: subl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0] ; SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSSE3-NEXT: movdqa %xmm5, %xmm3 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v16i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmaxud %xmm4, %xmm0 -; SSE41-NEXT: psubd %xmm4, %xmm0 -; SSE41-NEXT: pmaxud %xmm5, %xmm1 -; SSE41-NEXT: psubd %xmm5, %xmm1 -; SSE41-NEXT: pmaxud %xmm6, %xmm2 -; SSE41-NEXT: psubd %xmm6, %xmm2 -; SSE41-NEXT: pmaxud %xmm7, %xmm3 -; SSE41-NEXT: psubd %xmm7, %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm8 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: pextrd $1, %xmm4, %ecx +; SSE41-NEXT: pextrd $1, %xmm0, %edx +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: movd %xmm4, %ecx +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: subl %ecx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm0 +; SSE41-NEXT: pinsrd $1, %edx, %xmm0 +; SSE41-NEXT: pextrd $2, %xmm4, %ecx +; SSE41-NEXT: pextrd $2, %xmm1, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm0 +; SSE41-NEXT: pextrd $3, %xmm4, %ecx +; SSE41-NEXT: pextrd $3, %xmm1, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm0 +; SSE41-NEXT: pextrd $1, %xmm5, %ecx +; SSE41-NEXT: pextrd $1, %xmm8, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: movd %xmm5, %ecx +; SSE41-NEXT: movd %xmm8, %esi +; SSE41-NEXT: subl %ecx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm1 +; SSE41-NEXT: pinsrd $1, %edx, %xmm1 +; SSE41-NEXT: pextrd $2, %xmm5, %ecx +; SSE41-NEXT: pextrd $2, %xmm8, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm1 +; SSE41-NEXT: pextrd $3, %xmm5, %ecx +; SSE41-NEXT: pextrd $3, %xmm8, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm1 +; SSE41-NEXT: pextrd $1, %xmm6, %ecx +; SSE41-NEXT: pextrd $1, %xmm2, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: movd %xmm6, %ecx +; SSE41-NEXT: movd %xmm2, %esi +; SSE41-NEXT: subl %ecx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm4 +; SSE41-NEXT: pinsrd $1, %edx, %xmm4 +; SSE41-NEXT: pextrd $2, %xmm6, %ecx +; SSE41-NEXT: pextrd $2, %xmm2, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm4 +; SSE41-NEXT: pextrd $3, %xmm6, %ecx +; SSE41-NEXT: pextrd $3, %xmm2, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm4 +; SSE41-NEXT: pextrd $1, %xmm7, %ecx +; SSE41-NEXT: pextrd $1, %xmm3, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: movd %xmm7, %ecx +; SSE41-NEXT: movd %xmm3, %esi +; SSE41-NEXT: subl %ecx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm5 +; SSE41-NEXT: pinsrd $1, %edx, %xmm5 +; SSE41-NEXT: pextrd $2, %xmm7, %ecx +; SSE41-NEXT: pextrd $2, %xmm3, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm5 +; SSE41-NEXT: pextrd $3, %xmm7, %ecx +; SSE41-NEXT: pextrd $3, %xmm3, %edx +; SSE41-NEXT: subl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm5 +; SSE41-NEXT: movdqa %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm5, %xmm3 ; SSE41-NEXT: retq ; ; AVX1-LABEL: v16i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vpextrd $1, %xmm4, %ecx ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX1-NEXT: vpmaxud %xmm4, %xmm5, %xmm5 -; AVX1-NEXT: vpsubd %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vpmaxud %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $1, %xmm5, %edx +; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vmovd %xmm4, %ecx +; AVX1-NEXT: vmovd %xmm5, %esi +; AVX1-NEXT: subl %ecx, %esi +; AVX1-NEXT: cmovbl %eax, %esi +; AVX1-NEXT: vmovd %esi, %xmm6 +; AVX1-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6 +; AVX1-NEXT: vpextrd $2, %xmm4, %ecx +; AVX1-NEXT: vpextrd $2, %xmm5, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6 +; AVX1-NEXT: vpextrd $3, %xmm4, %ecx +; AVX1-NEXT: vpextrd $3, %xmm5, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4 +; AVX1-NEXT: vpextrd $1, %xmm2, %ecx +; AVX1-NEXT: vpextrd $1, %xmm0, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vmovd %xmm2, %ecx +; AVX1-NEXT: vmovd %xmm0, %esi +; AVX1-NEXT: subl %ecx, %esi +; AVX1-NEXT: cmovbl %eax, %esi +; AVX1-NEXT: vmovd %esi, %xmm5 +; AVX1-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 +; AVX1-NEXT: vpextrd $2, %xmm2, %ecx +; AVX1-NEXT: vpextrd $2, %xmm0, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 +; AVX1-NEXT: vpextrd $3, %xmm2, %ecx +; AVX1-NEXT: vpextrd $3, %xmm0, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm2, %ecx ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 -; AVX1-NEXT: vpmaxud %xmm2, %xmm4, %xmm4 -; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpmaxud %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm4, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vmovd %xmm2, %ecx +; AVX1-NEXT: vmovd %xmm4, %esi +; AVX1-NEXT: subl %ecx, %esi +; AVX1-NEXT: cmovbl %eax, %esi +; AVX1-NEXT: vmovd %esi, %xmm5 +; AVX1-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 +; AVX1-NEXT: vpextrd $2, %xmm2, %ecx +; AVX1-NEXT: vpextrd $2, %xmm4, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 +; AVX1-NEXT: vpextrd $3, %xmm2, %ecx +; AVX1-NEXT: vpextrd $3, %xmm4, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm3, %ecx +; AVX1-NEXT: vpextrd $1, %xmm1, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vmovd %xmm3, %ecx +; AVX1-NEXT: vmovd %xmm1, %esi +; AVX1-NEXT: subl %ecx, %esi +; AVX1-NEXT: cmovbl %eax, %esi +; AVX1-NEXT: vmovd %esi, %xmm4 +; AVX1-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrd $2, %xmm3, %ecx +; AVX1-NEXT: vpextrd $2, %xmm1, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrd $3, %xmm3, %ecx +; AVX1-NEXT: vpextrd $3, %xmm1, %edx +; AVX1-NEXT: subl %ecx, %edx +; AVX1-NEXT: cmovbl %eax, %edx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v16i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmaxud %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpmaxud %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vpextrd $1, %xmm4, %ecx +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 +; AVX2-NEXT: vpextrd $1, %xmm5, %edx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vmovd %xmm4, %ecx +; AVX2-NEXT: vmovd %xmm5, %esi +; AVX2-NEXT: subl %ecx, %esi +; AVX2-NEXT: cmovbl %eax, %esi +; AVX2-NEXT: vmovd %esi, %xmm6 +; AVX2-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6 +; AVX2-NEXT: vpextrd $2, %xmm4, %ecx +; AVX2-NEXT: vpextrd $2, %xmm5, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6 +; AVX2-NEXT: vpextrd $3, %xmm4, %ecx +; AVX2-NEXT: vpextrd $3, %xmm5, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4 +; AVX2-NEXT: vpextrd $1, %xmm2, %ecx +; AVX2-NEXT: vpextrd $1, %xmm0, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: vmovd %xmm0, %esi +; AVX2-NEXT: subl %ecx, %esi +; AVX2-NEXT: cmovbl %eax, %esi +; AVX2-NEXT: vmovd %esi, %xmm5 +; AVX2-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 +; AVX2-NEXT: vpextrd $2, %xmm2, %ecx +; AVX2-NEXT: vpextrd $2, %xmm0, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 +; AVX2-NEXT: vpextrd $3, %xmm2, %ecx +; AVX2-NEXT: vpextrd $3, %xmm0, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm2, %ecx +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4 +; AVX2-NEXT: vpextrd $1, %xmm4, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: vmovd %xmm4, %esi +; AVX2-NEXT: subl %ecx, %esi +; AVX2-NEXT: cmovbl %eax, %esi +; AVX2-NEXT: vmovd %esi, %xmm5 +; AVX2-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 +; AVX2-NEXT: vpextrd $2, %xmm2, %ecx +; AVX2-NEXT: vpextrd $2, %xmm4, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 +; AVX2-NEXT: vpextrd $3, %xmm2, %ecx +; AVX2-NEXT: vpextrd $3, %xmm4, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm3, %ecx +; AVX2-NEXT: vpextrd $1, %xmm1, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vmovd %xmm3, %ecx +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: subl %ecx, %esi +; AVX2-NEXT: cmovbl %eax, %esi +; AVX2-NEXT: vmovd %esi, %xmm4 +; AVX2-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrd $2, %xmm3, %ecx +; AVX2-NEXT: vpextrd $2, %xmm1, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrd $3, %xmm3, %ecx +; AVX2-NEXT: vpextrd $3, %xmm1, %edx +; AVX2-NEXT: subl %ecx, %edx +; AVX2-NEXT: cmovbl %eax, %edx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v16i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmaxud %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm2 +; AVX512-NEXT: vpextrd $1, %xmm2, %ecx +; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm3 +; AVX512-NEXT: vpextrd $1, %xmm3, %edx +; AVX512-NEXT: xorl %eax, %eax +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vmovd %xmm2, %ecx +; AVX512-NEXT: vmovd %xmm3, %esi +; AVX512-NEXT: subl %ecx, %esi +; AVX512-NEXT: cmovbl %eax, %esi +; AVX512-NEXT: vmovd %esi, %xmm4 +; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX512-NEXT: vpextrd $2, %xmm2, %ecx +; AVX512-NEXT: vpextrd $2, %xmm3, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX512-NEXT: vpextrd $3, %xmm2, %ecx +; AVX512-NEXT: vpextrd $3, %xmm3, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 +; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3 +; AVX512-NEXT: vpextrd $1, %xmm3, %ecx +; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm4 +; AVX512-NEXT: vpextrd $1, %xmm4, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vmovd %xmm3, %ecx +; AVX512-NEXT: vmovd %xmm4, %esi +; AVX512-NEXT: subl %ecx, %esi +; AVX512-NEXT: cmovbl %eax, %esi +; AVX512-NEXT: vmovd %esi, %xmm5 +; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 +; AVX512-NEXT: vpextrd $2, %xmm3, %ecx +; AVX512-NEXT: vpextrd $2, %xmm4, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 +; AVX512-NEXT: vpextrd $3, %xmm3, %ecx +; AVX512-NEXT: vpextrd $3, %xmm4, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX512-NEXT: vpextrd $1, %xmm3, %ecx +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4 +; AVX512-NEXT: vpextrd $1, %xmm4, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vmovd %xmm3, %ecx +; AVX512-NEXT: vmovd %xmm4, %esi +; AVX512-NEXT: subl %ecx, %esi +; AVX512-NEXT: cmovbl %eax, %esi +; AVX512-NEXT: vmovd %esi, %xmm5 +; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 +; AVX512-NEXT: vpextrd $2, %xmm3, %ecx +; AVX512-NEXT: vpextrd $2, %xmm4, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 +; AVX512-NEXT: vpextrd $3, %xmm3, %ecx +; AVX512-NEXT: vpextrd $3, %xmm4, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3 +; AVX512-NEXT: vpextrd $1, %xmm1, %ecx +; AVX512-NEXT: vpextrd $1, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vmovd %xmm1, %ecx +; AVX512-NEXT: vmovd %xmm0, %esi +; AVX512-NEXT: subl %ecx, %esi +; AVX512-NEXT: cmovbl %eax, %esi +; AVX512-NEXT: vmovd %esi, %xmm4 +; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 +; AVX512-NEXT: vpextrd $2, %xmm1, %ecx +; AVX512-NEXT: vpextrd $2, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 +; AVX512-NEXT: vpextrd $3, %xmm1, %ecx +; AVX512-NEXT: vpextrd $3, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: cmovbl %eax, %edx +; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0 +; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 ; AVX512-NEXT: retq %z = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %x, <16 x i32> %y) ret <16 x i32> %z @@ -996,89 +1788,73 @@ define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind { define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; SSE2-LABEL: v2i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: pxor %xmm2, %xmm3 -; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] -; SSE2-NEXT: por %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: pandn %xmm1, %xmm3 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: psubq %xmm1, %xmm0 +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v2i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] -; SSSE3-NEXT: movdqa %xmm1, %xmm3 -; SSSE3-NEXT: pxor %xmm2, %xmm3 -; SSSE3-NEXT: pxor %xmm0, %xmm2 -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSSE3-NEXT: pand %xmm5, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] -; SSSE3-NEXT: por %xmm2, %xmm3 -; SSSE3-NEXT: pand %xmm3, %xmm0 -; SSSE3-NEXT: pandn %xmm1, %xmm3 -; SSSE3-NEXT: por %xmm3, %xmm0 -; SSSE3-NEXT: psubq %xmm1, %xmm0 +; SSSE3-NEXT: movq %xmm1, %rax +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm1, %rax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v2i64: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456] -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: pxor %xmm0, %xmm3 -; SSE41-NEXT: pxor %xmm2, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm4 -; SSE41-NEXT: pcmpgtd %xmm3, %xmm4 -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm3, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE41-NEXT: pand %xmm5, %xmm0 -; SSE41-NEXT: por %xmm4, %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3 -; SSE41-NEXT: psubq %xmm1, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pextrq $1, %xmm1, %rax +; SSE41-NEXT: pextrq $1, %xmm0, %rcx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm2 +; SSE41-NEXT: movq %xmm1, %rax +; SSE41-NEXT: movq %xmm0, %rcx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE41-NEXT: retq ; -; AVX1-LABEL: v2i64: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3 -; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: v2i64: -; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] -; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3 -; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2 -; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512-LABEL: v2i64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: retq +; AVX-LABEL: v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vpextrq $1, %xmm1, %rax +; AVX-NEXT: vpextrq $1, %xmm0, %rcx +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: subq %rax, %rcx +; AVX-NEXT: cmovbq %rdx, %rcx +; AVX-NEXT: vmovq %rcx, %xmm2 +; AVX-NEXT: vmovq %xmm1, %rax +; AVX-NEXT: vmovq %xmm0, %rcx +; AVX-NEXT: subq %rax, %rcx +; AVX-NEXT: cmovbq %rdx, %rcx +; AVX-NEXT: vmovq %rcx, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX-NEXT: retq %z = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %x, <2 x i64> %y) ret <2 x i64> %z } @@ -1086,144 +1862,185 @@ define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind { ; SSE2-LABEL: v4i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456] -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: pxor %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: pxor %xmm4, %xmm6 -; SSE2-NEXT: movdqa %xmm6, %xmm7 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm7 -; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm5, %xmm6 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3] -; SSE2-NEXT: pand %xmm8, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] -; SSE2-NEXT: por %xmm5, %xmm6 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: por %xmm6, %xmm0 -; SSE2-NEXT: psubq %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm2, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] -; SSE2-NEXT: pand %xmm6, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] -; SSE2-NEXT: por %xmm2, %xmm4 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: pandn %xmm3, %xmm4 -; SSE2-NEXT: por %xmm4, %xmm1 -; SSE2-NEXT: psubq %xmm3, %xmm1 +; SSE2-NEXT: movq %xmm2, %rax +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %rax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] +; SSE2-NEXT: movq %xmm3, %rax +; SSE2-NEXT: movq %xmm1, %rcx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSE2-NEXT: movdqa %xmm4, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v4i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456] -; SSSE3-NEXT: movdqa %xmm2, %xmm5 -; SSSE3-NEXT: pxor %xmm4, %xmm5 -; SSSE3-NEXT: movdqa %xmm0, %xmm6 -; SSSE3-NEXT: pxor %xmm4, %xmm6 -; SSSE3-NEXT: movdqa %xmm6, %xmm7 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7 -; SSSE3-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm5, %xmm6 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3] -; SSSE3-NEXT: pand %xmm8, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] -; SSSE3-NEXT: por %xmm5, %xmm6 -; SSSE3-NEXT: pand %xmm6, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm6 -; SSSE3-NEXT: por %xmm6, %xmm0 -; SSSE3-NEXT: psubq %xmm2, %xmm0 -; SSSE3-NEXT: movdqa %xmm3, %xmm2 -; SSSE3-NEXT: pxor %xmm4, %xmm2 -; SSSE3-NEXT: pxor %xmm1, %xmm4 -; SSSE3-NEXT: movdqa %xmm4, %xmm5 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm2, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] -; SSSE3-NEXT: pand %xmm6, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] -; SSSE3-NEXT: por %xmm2, %xmm4 -; SSSE3-NEXT: pand %xmm4, %xmm1 -; SSSE3-NEXT: pandn %xmm3, %xmm4 -; SSSE3-NEXT: por %xmm4, %xmm1 -; SSSE3-NEXT: psubq %xmm3, %xmm1 +; SSSE3-NEXT: movq %xmm2, %rax +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %rax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] +; SSSE3-NEXT: movq %xmm3, %rax +; SSSE3-NEXT: movq %xmm1, %rcx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSSE3-NEXT: movdqa %xmm4, %xmm0 +; SSSE3-NEXT: movdqa %xmm2, %xmm1 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v4i64: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm4 -; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456] -; SSE41-NEXT: movdqa %xmm2, %xmm5 -; SSE41-NEXT: pxor %xmm6, %xmm5 -; SSE41-NEXT: movdqa %xmm0, %xmm7 -; SSE41-NEXT: pxor %xmm6, %xmm7 -; SSE41-NEXT: movdqa %xmm7, %xmm0 -; SSE41-NEXT: pcmpgtd %xmm5, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm5, %xmm7 -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3] -; SSE41-NEXT: pand %xmm8, %xmm5 -; SSE41-NEXT: por %xmm5, %xmm0 -; SSE41-NEXT: movdqa %xmm2, %xmm5 -; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm5 -; SSE41-NEXT: psubq %xmm2, %xmm5 -; SSE41-NEXT: movdqa %xmm3, %xmm0 -; SSE41-NEXT: pxor %xmm6, %xmm0 -; SSE41-NEXT: pxor %xmm1, %xmm6 -; SSE41-NEXT: movdqa %xmm6, %xmm2 -; SSE41-NEXT: pcmpgtd %xmm0, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm0, %xmm6 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3] -; SSE41-NEXT: pand %xmm4, %xmm0 -; SSE41-NEXT: por %xmm2, %xmm0 -; SSE41-NEXT: movdqa %xmm3, %xmm2 -; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2 -; SSE41-NEXT: psubq %xmm3, %xmm2 -; SSE41-NEXT: movdqa %xmm5, %xmm0 -; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: pextrq $1, %xmm2, %rax +; SSE41-NEXT: pextrq $1, %xmm0, %rcx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm4 +; SSE41-NEXT: movq %xmm2, %rax +; SSE41-NEXT: movq %xmm0, %rcx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] +; SSE41-NEXT: pextrq $1, %xmm3, %rax +; SSE41-NEXT: pextrq $1, %xmm1, %rcx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm2 +; SSE41-NEXT: movq %xmm3, %rax +; SSE41-NEXT: movq %xmm1, %rcx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE41-NEXT: retq ; ; AVX1-LABEL: v4i64: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm5 -; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vpextrq $1, %xmm2, %rax ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm3, %rcx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: subq %rax, %rcx +; AVX1-NEXT: cmovbq %rdx, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm4 +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: vmovq %xmm3, %rcx +; AVX1-NEXT: subq %rax, %rcx +; AVX1-NEXT: cmovbq %rdx, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vpextrq $1, %xmm0, %rcx +; AVX1-NEXT: subq %rax, %rcx +; AVX1-NEXT: cmovbq %rdx, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm3 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vmovq %xmm0, %rcx +; AVX1-NEXT: subq %rax, %rcx +; AVX1-NEXT: cmovbq %rdx, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] -; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3 -; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2 -; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rcx +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: subq %rax, %rcx +; AVX2-NEXT: cmovbq %rdx, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm4 +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: vmovq %xmm3, %rcx +; AVX2-NEXT: subq %rax, %rcx +; AVX2-NEXT: cmovbq %rdx, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vpextrq $1, %xmm0, %rcx +; AVX2-NEXT: subq %rax, %rcx +; AVX2-NEXT: cmovbq %rdx, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm3 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vmovq %xmm0, %rcx +; AVX2-NEXT: subq %rax, %rcx +; AVX2-NEXT: cmovbq %rdx, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %rax +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rcx +; AVX512-NEXT: xorl %edx, %edx +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: cmovbq %rdx, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm4 +; AVX512-NEXT: vmovq %xmm2, %rax +; AVX512-NEXT: vmovq %xmm3, %rcx +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: cmovbq %rdx, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; AVX512-NEXT: vpextrq $1, %xmm1, %rax +; AVX512-NEXT: vpextrq $1, %xmm0, %rcx +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: cmovbq %rdx, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm3 +; AVX512-NEXT: vmovq %xmm1, %rax +; AVX512-NEXT: vmovq %xmm0, %rcx +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: cmovbq %rdx, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm0 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX512-NEXT: retq %z = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %x, <4 x i64> %y) ret <4 x i64> %z @@ -1232,257 +2049,341 @@ define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind { define <8 x i64> @v8i64(<8 x i64> %x, <8 x i64> %y) nounwind { ; SSE2-LABEL: v8i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456] -; SSE2-NEXT: movdqa %xmm4, %xmm9 -; SSE2-NEXT: pxor %xmm8, %xmm9 -; SSE2-NEXT: movdqa %xmm0, %xmm10 -; SSE2-NEXT: pxor %xmm8, %xmm10 -; SSE2-NEXT: movdqa %xmm10, %xmm11 -; SSE2-NEXT: pcmpgtd %xmm9, %xmm11 -; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm9, %xmm10 -; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3] -; SSE2-NEXT: pand %xmm12, %xmm9 -; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3] -; SSE2-NEXT: por %xmm9, %xmm10 -; SSE2-NEXT: pand %xmm10, %xmm0 -; SSE2-NEXT: pandn %xmm4, %xmm10 -; SSE2-NEXT: por %xmm10, %xmm0 -; SSE2-NEXT: psubq %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm5, %xmm9 -; SSE2-NEXT: pxor %xmm8, %xmm9 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm10 -; SSE2-NEXT: pcmpgtd %xmm9, %xmm10 -; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm9, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3] -; SSE2-NEXT: pand %xmm11, %xmm9 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3] -; SSE2-NEXT: por %xmm9, %xmm4 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: pandn %xmm5, %xmm4 -; SSE2-NEXT: por %xmm4, %xmm1 -; SSE2-NEXT: psubq %xmm5, %xmm1 -; SSE2-NEXT: movdqa %xmm6, %xmm4 -; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: pxor %xmm8, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm9 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm9 -; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm4, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] -; SSE2-NEXT: pand %xmm10, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm9[1,1,3,3] -; SSE2-NEXT: por %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: pandn %xmm6, %xmm5 -; SSE2-NEXT: por %xmm5, %xmm2 -; SSE2-NEXT: psubq %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm7, %xmm4 -; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: pxor %xmm3, %xmm8 -; SSE2-NEXT: movdqa %xmm8, %xmm5 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm4, %xmm8 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[1,1,3,3] -; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] -; SSE2-NEXT: por %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm3 -; SSE2-NEXT: pandn %xmm7, %xmm5 -; SSE2-NEXT: por %xmm5, %xmm3 -; SSE2-NEXT: psubq %xmm7, %xmm3 +; SSE2-NEXT: movdqa %xmm1, %xmm8 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movq %xmm4, %rcx +; SSE2-NEXT: movq %xmm0, %rdx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] +; SSE2-NEXT: movq %xmm4, %rcx +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm1, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: movq %xmm5, %rcx +; SSE2-NEXT: movq %xmm8, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] +; SSE2-NEXT: movq %xmm4, %rcx +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] +; SSE2-NEXT: movq %xmm4, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm4 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE2-NEXT: movq %xmm6, %rcx +; SSE2-NEXT: movq %xmm2, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] +; SSE2-NEXT: movq %xmm5, %rcx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] +; SSE2-NEXT: movq %xmm7, %rcx +; SSE2-NEXT: movq %xmm3, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %rdx +; SSE2-NEXT: subq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0] +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm5, %xmm3 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456] -; SSSE3-NEXT: movdqa %xmm4, %xmm9 -; SSSE3-NEXT: pxor %xmm8, %xmm9 -; SSSE3-NEXT: movdqa %xmm0, %xmm10 -; SSSE3-NEXT: pxor %xmm8, %xmm10 -; SSSE3-NEXT: movdqa %xmm10, %xmm11 -; SSSE3-NEXT: pcmpgtd %xmm9, %xmm11 -; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm9, %xmm10 -; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3] -; SSSE3-NEXT: pand %xmm12, %xmm9 -; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3] -; SSSE3-NEXT: por %xmm9, %xmm10 -; SSSE3-NEXT: pand %xmm10, %xmm0 -; SSSE3-NEXT: pandn %xmm4, %xmm10 -; SSSE3-NEXT: por %xmm10, %xmm0 -; SSSE3-NEXT: psubq %xmm4, %xmm0 -; SSSE3-NEXT: movdqa %xmm5, %xmm9 -; SSSE3-NEXT: pxor %xmm8, %xmm9 -; SSSE3-NEXT: movdqa %xmm1, %xmm4 -; SSSE3-NEXT: pxor %xmm8, %xmm4 -; SSSE3-NEXT: movdqa %xmm4, %xmm10 -; SSSE3-NEXT: pcmpgtd %xmm9, %xmm10 -; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3] -; SSSE3-NEXT: pand %xmm11, %xmm9 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3] -; SSSE3-NEXT: por %xmm9, %xmm4 -; SSSE3-NEXT: pand %xmm4, %xmm1 -; SSSE3-NEXT: pandn %xmm5, %xmm4 -; SSSE3-NEXT: por %xmm4, %xmm1 -; SSSE3-NEXT: psubq %xmm5, %xmm1 -; SSSE3-NEXT: movdqa %xmm6, %xmm4 -; SSSE3-NEXT: pxor %xmm8, %xmm4 -; SSSE3-NEXT: movdqa %xmm2, %xmm5 -; SSSE3-NEXT: pxor %xmm8, %xmm5 -; SSSE3-NEXT: movdqa %xmm5, %xmm9 -; SSSE3-NEXT: pcmpgtd %xmm4, %xmm9 -; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] -; SSSE3-NEXT: pand %xmm10, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm9[1,1,3,3] -; SSSE3-NEXT: por %xmm4, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm2 -; SSSE3-NEXT: pandn %xmm6, %xmm5 -; SSSE3-NEXT: por %xmm5, %xmm2 -; SSSE3-NEXT: psubq %xmm6, %xmm2 -; SSSE3-NEXT: movdqa %xmm7, %xmm4 -; SSSE3-NEXT: pxor %xmm8, %xmm4 -; SSSE3-NEXT: pxor %xmm3, %xmm8 -; SSSE3-NEXT: movdqa %xmm8, %xmm5 -; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm8 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[1,1,3,3] -; SSSE3-NEXT: pand %xmm6, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] -; SSSE3-NEXT: por %xmm4, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm3 -; SSSE3-NEXT: pandn %xmm7, %xmm5 -; SSSE3-NEXT: por %xmm5, %xmm3 -; SSSE3-NEXT: psubq %xmm7, %xmm3 +; SSSE3-NEXT: movdqa %xmm1, %xmm8 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: movq %xmm4, %rcx +; SSSE3-NEXT: movq %xmm0, %rdx +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] +; SSSE3-NEXT: movq %xmm4, %rcx +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm1, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm1 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT: movq %xmm5, %rcx +; SSSE3-NEXT: movq %xmm8, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] +; SSSE3-NEXT: movq %xmm4, %rcx +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] +; SSSE3-NEXT: movq %xmm4, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm4 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSSE3-NEXT: movq %xmm6, %rcx +; SSSE3-NEXT: movq %xmm2, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] +; SSSE3-NEXT: movq %xmm5, %rcx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm2 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] +; SSSE3-NEXT: movq %xmm7, %rcx +; SSSE3-NEXT: movq %xmm3, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %rcx +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %rdx +; SSSE3-NEXT: subq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm2 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0] +; SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSSE3-NEXT: movdqa %xmm5, %xmm3 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v8i64: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm1, %xmm8 -; SSE41-NEXT: movdqa %xmm0, %xmm11 -; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259456,9223372039002259456] -; SSE41-NEXT: movdqa %xmm4, %xmm9 -; SSE41-NEXT: pxor %xmm10, %xmm9 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pxor %xmm10, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: pcmpgtd %xmm9, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm0[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm9, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE41-NEXT: pand %xmm12, %xmm1 -; SSE41-NEXT: por %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm4, %xmm9 -; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm9 -; SSE41-NEXT: psubq %xmm4, %xmm9 -; SSE41-NEXT: movdqa %xmm5, %xmm0 -; SSE41-NEXT: pxor %xmm10, %xmm0 -; SSE41-NEXT: movdqa %xmm8, %xmm1 -; SSE41-NEXT: pxor %xmm10, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: pcmpgtd %xmm0, %xmm4 -; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm4[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm0, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] -; SSE41-NEXT: pand %xmm11, %xmm0 -; SSE41-NEXT: por %xmm4, %xmm0 -; SSE41-NEXT: movdqa %xmm5, %xmm1 -; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm1 -; SSE41-NEXT: psubq %xmm5, %xmm1 -; SSE41-NEXT: movdqa %xmm6, %xmm0 -; SSE41-NEXT: pxor %xmm10, %xmm0 -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pxor %xmm10, %xmm4 -; SSE41-NEXT: movdqa %xmm4, %xmm5 -; SSE41-NEXT: pcmpgtd %xmm0, %xmm5 -; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm5[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm0, %xmm4 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] -; SSE41-NEXT: pand %xmm8, %xmm0 -; SSE41-NEXT: por %xmm5, %xmm0 -; SSE41-NEXT: movdqa %xmm6, %xmm4 -; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4 -; SSE41-NEXT: psubq %xmm6, %xmm4 -; SSE41-NEXT: movdqa %xmm7, %xmm0 -; SSE41-NEXT: pxor %xmm10, %xmm0 -; SSE41-NEXT: pxor %xmm3, %xmm10 -; SSE41-NEXT: movdqa %xmm10, %xmm2 -; SSE41-NEXT: pcmpgtd %xmm0, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2] -; SSE41-NEXT: pcmpeqd %xmm0, %xmm10 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3] -; SSE41-NEXT: pand %xmm5, %xmm0 -; SSE41-NEXT: por %xmm2, %xmm0 -; SSE41-NEXT: movdqa %xmm7, %xmm5 -; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm5 -; SSE41-NEXT: psubq %xmm7, %xmm5 -; SSE41-NEXT: movdqa %xmm9, %xmm0 -; SSE41-NEXT: movdqa %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm5, %xmm3 +; SSE41-NEXT: pextrq $1, %xmm4, %rcx +; SSE41-NEXT: pextrq $1, %xmm0, %rdx +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm8 +; SSE41-NEXT: movq %xmm4, %rcx +; SSE41-NEXT: movq %xmm0, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0] +; SSE41-NEXT: pextrq $1, %xmm5, %rcx +; SSE41-NEXT: pextrq $1, %xmm1, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm4 +; SSE41-NEXT: movq %xmm5, %rcx +; SSE41-NEXT: movq %xmm1, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm1 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE41-NEXT: pextrq $1, %xmm6, %rcx +; SSE41-NEXT: pextrq $1, %xmm2, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm4 +; SSE41-NEXT: movq %xmm6, %rcx +; SSE41-NEXT: movq %xmm2, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm2 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSE41-NEXT: pextrq $1, %xmm7, %rcx +; SSE41-NEXT: pextrq $1, %xmm3, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm4 +; SSE41-NEXT: movq %xmm7, %rcx +; SSE41-NEXT: movq %xmm3, %rdx +; SSE41-NEXT: subq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm3 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0] ; SSE41-NEXT: retq ; ; AVX1-LABEL: v8i64: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm6 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7 -; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm8 -; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm7 -; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm6, %ymm6 -; AVX1-NEXT: vblendvpd %ymm6, %ymm0, %ymm2, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 -; AVX1-NEXT: vpsubq %xmm4, %xmm6, %xmm4 -; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm4, %rcx +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpextrq $1, %xmm5, %rdx +; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm6 +; AVX1-NEXT: vmovq %xmm4, %rcx +; AVX1-NEXT: vmovq %xmm5, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm4 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] +; AVX1-NEXT: vpextrq $1, %xmm2, %rcx +; AVX1-NEXT: vpextrq $1, %xmm0, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm5 +; AVX1-NEXT: vmovq %xmm2, %rcx +; AVX1-NEXT: vmovq %xmm0, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 -; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm4 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 -; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4 -; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm6 -; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1 +; AVX1-NEXT: vpextrq $1, %xmm2, %rcx ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 -; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm4, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm5 +; AVX1-NEXT: vmovq %xmm2, %rcx +; AVX1-NEXT: vmovq %xmm4, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0] +; AVX1-NEXT: vpextrq $1, %xmm3, %rcx +; AVX1-NEXT: vpextrq $1, %xmm1, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm4 +; AVX1-NEXT: vmovq %xmm3, %rcx +; AVX1-NEXT: vmovq %xmm1, %rdx +; AVX1-NEXT: subq %rcx, %rdx +; AVX1-NEXT: cmovbq %rax, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v8i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] -; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm5 -; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm6 -; AVX2-NEXT: vpcmpgtq %ymm5, %ymm6, %ymm5 -; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm2 -; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm4 -; AVX2-NEXT: vpcmpgtq %ymm2, %ymm4, %ymm2 -; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm3, %ymm1 -; AVX2-NEXT: vpsubq %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vpextrq $1, %xmm4, %rcx +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 +; AVX2-NEXT: vpextrq $1, %xmm5, %rdx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm6 +; AVX2-NEXT: vmovq %xmm4, %rcx +; AVX2-NEXT: vmovq %xmm5, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm4 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] +; AVX2-NEXT: vpextrq $1, %xmm2, %rcx +; AVX2-NEXT: vpextrq $1, %xmm0, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm5 +; AVX2-NEXT: vmovq %xmm2, %rcx +; AVX2-NEXT: vmovq %xmm0, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rcx +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4 +; AVX2-NEXT: vpextrq $1, %xmm4, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm5 +; AVX2-NEXT: vmovq %xmm2, %rcx +; AVX2-NEXT: vmovq %xmm4, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0] +; AVX2-NEXT: vpextrq $1, %xmm3, %rcx +; AVX2-NEXT: vpextrq $1, %xmm1, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm4 +; AVX2-NEXT: vmovq %xmm3, %rcx +; AVX2-NEXT: vmovq %xmm1, %rdx +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: cmovbq %rax, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v8i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %rcx +; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rdx +; AVX512-NEXT: xorl %eax, %eax +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm4 +; AVX512-NEXT: vmovq %xmm2, %rcx +; AVX512-NEXT: vmovq %xmm3, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rcx +; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm4 +; AVX512-NEXT: vpextrq $1, %xmm4, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm5 +; AVX512-NEXT: vmovq %xmm3, %rcx +; AVX512-NEXT: vmovq %xmm4, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm3 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rcx +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4 +; AVX512-NEXT: vpextrq $1, %xmm4, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm5 +; AVX512-NEXT: vmovq %xmm3, %rcx +; AVX512-NEXT: vmovq %xmm4, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm3 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] +; AVX512-NEXT: vpextrq $1, %xmm1, %rcx +; AVX512-NEXT: vpextrq $1, %xmm0, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm4 +; AVX512-NEXT: vmovq %xmm1, %rcx +; AVX512-NEXT: vmovq %xmm0, %rdx +; AVX512-NEXT: subq %rcx, %rdx +; AVX512-NEXT: cmovbq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm0 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] +; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 ; AVX512-NEXT: retq %z = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %x, <8 x i64> %y) ret <8 x i64> %z |