aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFilipp Zhinkin <filipp.zhinkin@gmail.com>2022-08-05 10:20:59 -0400
committerSanjay Patel <spatel@rotateright.com>2022-08-05 10:45:43 -0400
commit249a7ed750729b79341fb22003a0f4537883d928 (patch)
treec0a4b67b4628fafa2ad481c9c849beb794f21edc
parent542977d43841820614a32823c33415042430e230 (diff)
[x86] add tests for bitwise logic of funnel shifts; NFC
Baseline tests for D130994
-rw-r--r--llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll154
1 files changed, 154 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll b/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll
new file mode 100644
index 000000000000..b20cca63aa80
--- /dev/null
+++ b/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll
@@ -0,0 +1,154 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
+
+declare i64 @llvm.fshl.i64(i64, i64, i64) nounwind readnone
+declare i64 @llvm.fshr.i64(i64, i64, i64) nounwind readnone
+
+define i64 @hoist_fshl_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshl_from_or:
+; X64: # %bb.0:
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: shldq %cl, %rsi, %rdi
+; X64-NEXT: shldq %cl, %rax, %rdx
+; X64-NEXT: orq %rdi, %rdx
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: retq
+ %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
+ %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
+ %res = or i64 %fshl.0, %fshl.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshl_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshl_from_and:
+; X64: # %bb.0:
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: shldq %cl, %rsi, %rdi
+; X64-NEXT: shldq %cl, %rax, %rdx
+; X64-NEXT: andq %rdi, %rdx
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: retq
+ %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
+ %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
+ %res = and i64 %fshl.0, %fshl.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshl_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshl_from_xor:
+; X64: # %bb.0:
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: shldq %cl, %rsi, %rdi
+; X64-NEXT: shldq %cl, %rax, %rdx
+; X64-NEXT: xorq %rdi, %rdx
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: retq
+ %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
+ %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
+ %res = xor i64 %fshl.0, %fshl.1
+ ret i64 %res
+}
+
+define i64 @fshl_or_with_different_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: fshl_or_with_different_shift_value:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: shldq $12, %rsi, %rdi
+; X64-NEXT: shldq $13, %rcx, %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 12)
+ %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 13)
+ %res = or i64 %fshl.0, %fshl.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshl_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: hoist_fshl_from_or_const_shift:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: shldq $15, %rsi, %rdi
+; X64-NEXT: shldq $15, %rcx, %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 15)
+ %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 15)
+ %res = or i64 %fshl.0, %fshl.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshr_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshr_from_or:
+; X64: # %bb.0:
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: shrdq %cl, %rdi, %rsi
+; X64-NEXT: shrdq %cl, %rdx, %rax
+; X64-NEXT: orq %rsi, %rax
+; X64-NEXT: retq
+ %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
+ %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
+ %res = or i64 %fshr.0, %fshr.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshr_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshr_from_and:
+; X64: # %bb.0:
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: shrdq %cl, %rdi, %rsi
+; X64-NEXT: shrdq %cl, %rdx, %rax
+; X64-NEXT: andq %rsi, %rax
+; X64-NEXT: retq
+ %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
+ %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
+ %res = and i64 %fshr.0, %fshr.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshr_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshr_from_xor:
+; X64: # %bb.0:
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: shrdq %cl, %rdi, %rsi
+; X64-NEXT: shrdq %cl, %rdx, %rax
+; X64-NEXT: xorq %rsi, %rax
+; X64-NEXT: retq
+ %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
+ %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
+ %res = xor i64 %fshr.0, %fshr.1
+ ret i64 %res
+}
+
+define i64 @fshr_or_with_different_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: fshr_or_with_different_shift_value:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: shldq $52, %rsi, %rdi
+; X64-NEXT: shldq $51, %rcx, %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 12)
+ %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 13)
+ %res = or i64 %fshr.0, %fshr.1
+ ret i64 %res
+}
+
+define i64 @hoist_fshr_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: hoist_fshr_from_or_const_shift:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: shldq $49, %rsi, %rdi
+; X64-NEXT: shldq $49, %rcx, %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 15)
+ %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 15)
+ %res = or i64 %fshr.0, %fshr.1
+ ret i64 %res
+}