Skip to content

Commit

Permalink
[X86] vector-shift tests - regenerate VPTERNLOG comments
Browse files Browse the repository at this point in the history
  • Loading branch information
RKSimon committed Dec 17, 2024
1 parent 8bbbcad commit 2a92290
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 30 deletions.
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1553,7 +1553,7 @@ define <8 x i16> @constant_shift_v8i16_pairs(<8 x i16> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [256,256,16384,16384,4096,4096,512,512]
; AVX512DQVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX512DQVL-NEXT: vpternlogq {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512DQVL-NEXT: vpsubw %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
Expand Down Expand Up @@ -1734,15 +1734,15 @@ define <16 x i8> @constant_shift_v16i8_pairs(<16 x i8> %a) nounwind {
; AVX512DQVL-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [32768,4096,512,8192,16384,u,2048,1024]
; AVX512DQVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [64,64,8,8,1,1,16,16,32,32,128,128,4,4,2,2]
; AVX512DQVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX512DQVL-NEXT: vpternlogq {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512DQVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v16i8_pairs:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm1 = [64,64,8,8,1,1,16,16,32,32,128,128,4,4,2,2]
; AVX512BWVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX512BWVL-NEXT: vpternlogq {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512BWVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
Expand Down Expand Up @@ -1822,7 +1822,7 @@ define <16 x i8> @constant_shift_v16i8_quads(<16 x i8> %a) nounwind {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
; AVX512VL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX512VL-NEXT: vpternlogq {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
Expand Down Expand Up @@ -2021,15 +2021,15 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512DQVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512DQVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512DQVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatconstant_shift_v16i8:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BWVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512BWVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512BWVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
Original file line number Diff line number Diff line change
Expand Up @@ -952,7 +952,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQVL-NEXT: vpternlogq $108, %ymm0, %ymm2, %ymm1
; AVX512DQVL-NEXT: vpternlogq {{.*#+}} ymm1 = ymm2 ^ (ymm1 & ymm0)
; AVX512DQVL-NEXT: vpsubb %ymm2, %ymm1, %ymm0
; AVX512DQVL-NEXT: retq
;
Expand Down Expand Up @@ -1321,7 +1321,7 @@ define <32 x i8> @splatvar_modulo_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwi
; AVX512DQVL-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQVL-NEXT: vpternlogq $108, %ymm0, %ymm2, %ymm1
; AVX512DQVL-NEXT: vpternlogq {{.*#+}} ymm1 = ymm2 ^ (ymm1 & ymm0)
; AVX512DQVL-NEXT: vpsubb %ymm2, %ymm1, %ymm0
; AVX512DQVL-NEXT: retq
;
Expand Down Expand Up @@ -1681,7 +1681,7 @@ define <16 x i16> @constant_shift_v16i16_pairs(<16 x i16> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,16384,16384,4096,4096,8192,8192,512,512,256,256,1024,1024,2048,2048]
; AVX512DQVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
; AVX512DQVL-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; AVX512DQVL-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
Expand Down Expand Up @@ -1926,15 +1926,15 @@ define <32 x i8> @constant_shift_v32i8_pairs(<32 x i8> %a) nounwind {
; AVX512DQVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; AVX512DQVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,2,2,4,4,8,8,32,32,2,2,32,32,128,128,1,1,32,32,8,8,2,2,16,16,4,4,8,8,64,64]
; AVX512DQVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
; AVX512DQVL-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; AVX512DQVL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8_pairs:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,2,2,4,4,8,8,32,32,2,2,32,32,128,128,1,1,32,32,8,8,2,2,16,16,4,4,8,8,64,64]
; AVX512BWVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
; AVX512BWVL-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; AVX512BWVL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
Expand Down Expand Up @@ -2025,7 +2025,7 @@ define <32 x i8> @constant_shift_v32i8_quads(<32 x i8> %a) nounwind {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32,2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
; AVX512VL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
; AVX512VL-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; AVX512VL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
Expand Down Expand Up @@ -2295,15 +2295,15 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512DQVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
; AVX512DQVL-NEXT: vpternlogd {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; AVX512DQVL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatconstant_shift_v32i8:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512BWVL-NEXT: vpbroadcastb {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BWVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
; AVX512BWVL-NEXT: vpternlogd {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; AVX512BWVL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512BW-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpternlogq $108, %zmm0, %zmm2, %zmm1
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm1 = zmm2 ^ (zmm1 & zmm0)
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
Expand Down Expand Up @@ -307,7 +307,7 @@ define <64 x i8> @splatvar_modulo_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwi
; AVX512BW-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpternlogq $108, %zmm0, %zmm2, %zmm1
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm1 = zmm2 ^ (zmm1 & zmm0)
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%mod = and <64 x i8> %b, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
Expand Down Expand Up @@ -365,7 +365,7 @@ define <32 x i16> @constant_shift_v32i16_pairs(<32 x i16> %a) nounwind {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [128,128,128,128,64,64,64,64,32,32,32,32,16,16,16,16,8,8,8,8,4,4,4,4,2,2,2,2,1,1,1,1]
; AVX512DQ-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 ^ (zmm0 & mem)
; AVX512DQ-NEXT: vpsubw %ymm1, %ymm0, %ymm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; AVX512DQ-NEXT: vpsubw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
Expand Down Expand Up @@ -450,7 +450,7 @@ define <64 x i8> @constant_shift_v64i8_pairs(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [2,2,2,2,32,32,32,32,2,2,8,8,8,8,4,4,2,2,64,64,16,16,32,32,8,8,1,1,4,4,128,128,1,1,64,64,128,128,1,1,8,8,128,128,64,64,16,16,64,64,8,8,8,8,16,16,2,2,2,2,4,4,2,2]
; AVX512BW-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 ^ (zmm0 & mem)
; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = ashr <64 x i8> %a, <i8 6, i8 6, i8 6, i8 6, i8 2, i8 2, i8 2, i8 2, i8 6, i8 6, i8 4, i8 4, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 1, i8 1, i8 3, i8 3, i8 2, i8 2, i8 4, i8 4, i8 7, i8 7, i8 5, i8 5, i8 0, i8 0, i8 7, i8 7, i8 1, i8 1, i8 0, i8 0, i8 7, i8 7, i8 4, i8 4, i8 0, i8 0, i8 1, i8 1, i8 3, i8 3, i8 1, i8 1, i8 4, i8 4, i8 4, i8 4, i8 3, i8 3, i8 6, i8 6, i8 6, i8 6, i8 5, i8 5, i8 6, i8 6>
Expand All @@ -462,7 +462,7 @@ define <64 x i8> @constant_shift_v64i8_quads(<64 x i8> %a) nounwind {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,16,16,16,4,4,4,4,32,32,32,32,1,1,1,1,1,1,1,1,4,4,4,4,1,1,1,1,4,4,4,4,8,8,8,8,16,16,16,16,16,16,16,16,2,2,2,2,64,64,64,64,4,4,4,4,32,32,32,32,128,128,128,128]
; AVX512DQ-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 ^ (zmm0 & mem)
; AVX512DQ-NEXT: vpsubb %ymm1, %ymm0, %ymm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; AVX512DQ-NEXT: vpsubb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
Expand All @@ -473,7 +473,7 @@ define <64 x i8> @constant_shift_v64i8_quads(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,16,16,16,4,4,4,4,32,32,32,32,1,1,1,1,1,1,1,1,4,4,4,4,1,1,1,1,4,4,4,4,8,8,8,8,16,16,16,16,16,16,16,16,2,2,2,2,64,64,64,64,4,4,4,4,32,32,32,32,128,128,128,128]
; AVX512BW-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 ^ (zmm0 & mem)
; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 5, i8 5, i8 5, i8 5, i8 2, i8 2, i8 2, i8 2, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 5, i8 5, i8 5, i8 5, i8 4, i8 4, i8 4, i8 4, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 6, i8 6, i8 6, i8 6, i8 1, i8 1, i8 1, i8 1, i8 5, i8 5, i8 5, i8 5, i8 2, i8 2, i8 2, i8 2, i8 0, i8 0, i8 0, i8 0>
Expand Down Expand Up @@ -540,7 +540,7 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 = zmm1 ^ (zmm0 & mem)
; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2344,15 +2344,15 @@ define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512DQVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512DQVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512DQVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatconstant_shift_v8i8:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BWVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512BWVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512BWVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
Expand Down Expand Up @@ -2414,15 +2414,15 @@ define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512DQVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512DQVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512DQVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatconstant_shift_v4i8:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BWVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512BWVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512BWVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
Expand Down Expand Up @@ -2484,15 +2484,15 @@ define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512DQVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512DQVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512DQVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatconstant_shift_v2i8:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpbroadcastb {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BWVL-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
; AVX512BWVL-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; AVX512BWVL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/vector-shift-shl-256.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1371,7 +1371,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQVL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
; AVX512DQVL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
; AVX512DQVL-NEXT: vpsllw $8, %ymm0, %ymm0
; AVX512DQVL-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
; AVX512DQVL-NEXT: vpternlogd {{.*#+}} ymm0 = ymm0 | (ymm1 & mem)
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8:
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/vector-shift-shl-512.ll
Original file line number Diff line number Diff line change
Expand Up @@ -324,15 +324,15 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1
; AVX512DQ-NEXT: vpsllw $8, %ymm1, %ymm1
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 | (zmm2 & mem)
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 | (zmm1 & mem)
; AVX512BW-NEXT: retq
%shift = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
Expand Down

0 comments on commit 2a92290

Please sign in to comment.