From 79b1ca8ebdeb2bc8c20fceb53b95959526a0b716 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 2 Aug 2022 11:38:47 +0100 Subject: [DAG] matchRotateSub - ensure the (pre-extended) shift amount is wide enough for the amount mask (PR56859) matchRotateSub is given shift amounts that will already have stripped any/zero-extend nodes from - so make sure those values are wide enough to take a mask. (cherry picked from commit b651fdff79027064071db7c1d0250553e3e6a232) --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 26 ++++++++++-------- llvm/test/CodeGen/X86/rotate5.ll | 39 +++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 11 deletions(-) create mode 100644 llvm/test/CodeGen/X86/rotate5.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 654879115ff9..a367ea51be30 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7301,12 +7301,14 @@ static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize, unsigned MaskLoBits = 0; if (IsRotate && isPowerOf2_64(EltSize)) { unsigned Bits = Log2_64(EltSize); - APInt DemandedBits = - APInt::getLowBitsSet(Neg.getScalarValueSizeInBits(), Bits); - if (SDValue Inner = - TLI.SimplifyMultipleUseDemandedBits(Neg, DemandedBits, DAG)) { - Neg = Inner; - MaskLoBits = Bits; + unsigned NegBits = Neg.getScalarValueSizeInBits(); + if (NegBits >= Bits) { + APInt DemandedBits = APInt::getLowBitsSet(NegBits, Bits); + if (SDValue Inner = + TLI.SimplifyMultipleUseDemandedBits(Neg, DemandedBits, DAG)) { + Neg = Inner; + MaskLoBits = Bits; + } } } @@ -7322,11 +7324,13 @@ static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize, // affect Mask's demanded bits, just replace Pos with Pos'. These operations // are redundant for the purpose of the equality. if (MaskLoBits) { - APInt DemandedBits = - APInt::getLowBitsSet(Pos.getScalarValueSizeInBits(), MaskLoBits); - if (SDValue Inner = - TLI.SimplifyMultipleUseDemandedBits(Pos, DemandedBits, DAG)) { - Pos = Inner; + unsigned PosBits = Pos.getScalarValueSizeInBits(); + if (PosBits >= MaskLoBits) { + APInt DemandedBits = APInt::getLowBitsSet(PosBits, MaskLoBits); + if (SDValue Inner = + TLI.SimplifyMultipleUseDemandedBits(Pos, DemandedBits, DAG)) { + Pos = Inner; + } } } diff --git a/llvm/test/CodeGen/X86/rotate5.ll b/llvm/test/CodeGen/X86/rotate5.ll new file mode 100644 index 000000000000..b1f38f87ec89 --- /dev/null +++ b/llvm/test/CodeGen/X86/rotate5.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64 + +; Ensure that the (pre-extended) shift amount type is wide enough to take any mask. +define void @PR56859() { +; X86-LABEL: PR56859: +; X86: # %bb.0: # %entry +; X86-NEXT: movl (%eax), %ecx +; X86-NEXT: testl %ecx, %ecx +; X86-NEXT: setne %al +; X86-NEXT: movl $1, %edx +; X86-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-NEXT: shrl %cl, %edx +; X86-NEXT: btsl %eax, %edx +; X86-NEXT: movl %edx, (%eax) +; X86-NEXT: retl +; +; X64-LABEL: PR56859: +; X64: # %bb.0: # %entry +; X64-NEXT: movl (%rax), %ecx +; X64-NEXT: testl %ecx, %ecx +; X64-NEXT: setne %al +; X64-NEXT: movl $1, %edx +; X64-NEXT: # kill: def $cl killed $cl killed $ecx +; X64-NEXT: shrl %cl, %edx +; X64-NEXT: btsl %eax, %edx +; X64-NEXT: movl %edx, (%rax) +; X64-NEXT: retq +entry: + %0 = load i32, ptr undef, align 4 + %tobool = icmp ne i32 %0, 0 + %lor.ext = zext i1 %tobool to i32 + %shr = lshr i32 1, %0 + %shl = shl i32 1, %lor.ext + %or = or i32 %shl, %shr + store i32 %or, ptr undef, align 4 + ret void +} -- cgit v1.2.3