Skip to content

Commit c653284

Browse files
committed
Add necessary LLVM patches
1 parent 4753fed commit c653284

File tree

3 files changed

+149
-0
lines changed

3 files changed

+149
-0
lines changed

deps/llvm.mk

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -494,6 +494,8 @@ $(eval $(call LLVM_PATCH,llvm-PR278923)) # Issue #19976, Remove for 4.0
494494
$(eval $(call LLVM_PATCH,llvm-D28759-loopclearance))
495495
$(eval $(call LLVM_PATCH,llvm-D28786-callclearance))
496496
$(eval $(call LLVM_PATCH,llvm-rL293230-icc17-cmake)) # Remove for 4.0
497+
$(eval $(call LLVM_PATCH,llvm-D32593))
498+
$(eval $(call LLVM_PATCH,llvm-D33179))
497499
endif # LLVM_VER
498500

499501
ifeq ($(LLVM_VER),3.7.1)

deps/patches/llvm-D32593.patch

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
From 5eeab81d22e07b6e12821067fced590f534c251a Mon Sep 17 00:00:00 2001
2+
From: Keno Fischer <[email protected]>
3+
Date: Thu, 27 Apr 2017 14:33:33 -0400
4+
Subject: [PATCH] [SROA] Fix crash due to bad bitcast
5+
6+
Summary:
7+
As shown in the test case, SROA was crashing when trying to split
8+
stores (to the alloca) of loads (from anywhere), because it assumed
9+
the pointer operand to the loads and stores had to have the same
10+
address space. This isn't the case. Make sure to use the correct
11+
pointer type for both the load and the store.
12+
13+
Reviewers: chandlerc, majnemer, sanjoy
14+
15+
Subscribers: arsenm, llvm-commits
16+
17+
Differential Revision: https://reviews.llvm.org/D32593
18+
---
19+
lib/Transforms/Scalar/SROA.cpp | 7 ++++---
20+
test/Transforms/SROA/address-spaces.ll | 18 ++++++++++++++++++
21+
2 files changed, 22 insertions(+), 3 deletions(-)
22+
23+
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
24+
index d01e91a..610d5a8 100644
25+
--- a/lib/Transforms/Scalar/SROA.cpp
26+
+++ b/lib/Transforms/Scalar/SROA.cpp
27+
@@ -3697,7 +3697,8 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
28+
int Idx = 0, Size = Offsets.Splits.size();
29+
for (;;) {
30+
auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
31+
- auto *PartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
32+
+ auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
33+
+ auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
34+
35+
// Either lookup a split load or create one.
36+
LoadInst *PLoad;
37+
@@ -3708,7 +3709,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
38+
PLoad = IRB.CreateAlignedLoad(
39+
getAdjustedPtr(IRB, DL, LoadBasePtr,
40+
APInt(DL.getPointerSizeInBits(), PartOffset),
41+
- PartPtrTy, LoadBasePtr->getName() + "."),
42+
+ LoadPartPtrTy, LoadBasePtr->getName() + "."),
43+
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
44+
LI->getName());
45+
}
46+
@@ -3718,7 +3719,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
47+
StoreInst *PStore = IRB.CreateAlignedStore(
48+
PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr,
49+
APInt(DL.getPointerSizeInBits(), PartOffset),
50+
- PartPtrTy, StoreBasePtr->getName() + "."),
51+
+ StorePartPtrTy, StoreBasePtr->getName() + "."),
52+
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
53+
54+
// Now build a new slice for the alloca.
55+
diff --git a/test/Transforms/SROA/address-spaces.ll b/test/Transforms/SROA/address-spaces.ll
56+
index 119f225..8fba30c 100644
57+
--- a/test/Transforms/SROA/address-spaces.ll
58+
+++ b/test/Transforms/SROA/address-spaces.ll
59+
@@ -83,3 +83,21 @@ define void @pr27557() {
60+
store i32 addrspace(3)* @l, i32 addrspace(3)** %3, align 8
61+
ret void
62+
}
63+
+
64+
+; Make sure pre-splitting doesn't try to introduce an illegal bitcast
65+
+define float @presplit(i64 addrspace(1)* %p) {
66+
+entry:
67+
+; CHECK-LABEL: @presplit(
68+
+; CHECK: %[[CAST:.*]] = bitcast i64 addrspace(1)* {{.*}} to i32 addrspace(1)*
69+
+; CHECK: load i32, i32 addrspace(1)* %[[CAST]]
70+
+ %b = alloca i64
71+
+ %b.cast = bitcast i64* %b to [2 x float]*
72+
+ %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
73+
+ %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
74+
+ %l = load i64, i64 addrspace(1)* %p
75+
+ store i64 %l, i64* %b
76+
+ %f1 = load float, float* %b.gep1
77+
+ %f2 = load float, float* %b.gep2
78+
+ %ret = fadd float %f1, %f2
79+
+ ret float %ret
80+
+}
81+
--
82+
2.9.3
83+

deps/patches/llvm-D33179.patch

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
From b1a005ba688397ca360e89cd6c6f51f232d6c25e Mon Sep 17 00:00:00 2001
2+
From: Keno Fischer <[email protected]>
3+
Date: Fri, 19 May 2017 18:42:20 -0400
4+
Subject: [PATCH] [Sink] Fix predicate in legality check
5+
6+
Summary:
7+
isSafeToSpeculativelyExecute is the wrong predicate to use here.
8+
All that checks for is whether it is safe to hoist a value due to
9+
unaligned/un-dereferencable accesses. However, not only are we doing
10+
sinking rather than hoisting, our concern is that the location
11+
we're loading from may have been modified. Instead forbid sinking
12+
any load across a critical edge.
13+
14+
Reviewers: majnemer
15+
16+
Subscribers: llvm-commits
17+
18+
Differential Revision: https://reviews.llvm.org/D33179
19+
---
20+
lib/Transforms/Scalar/Sink.cpp | 2 +-
21+
test/Transforms/Sink/badloadsink.ll | 18 ++++++++++++++++++
22+
2 files changed, 19 insertions(+), 1 deletion(-)
23+
create mode 100644 test/Transforms/Sink/badloadsink.ll
24+
25+
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
26+
index 102e9ea..5210f16 100644
27+
--- a/lib/Transforms/Scalar/Sink.cpp
28+
+++ b/lib/Transforms/Scalar/Sink.cpp
29+
@@ -114,7 +114,7 @@ static bool IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo,
30+
if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
31+
// We cannot sink a load across a critical edge - there may be stores in
32+
// other code paths.
33+
- if (!isSafeToSpeculativelyExecute(Inst))
34+
+ if (isa<LoadInst>(Inst))
35+
return false;
36+
37+
// We don't want to sink across a critical edge if we don't dominate the
38+
diff --git a/test/Transforms/Sink/badloadsink.ll b/test/Transforms/Sink/badloadsink.ll
39+
new file mode 100644
40+
index 0000000..e3f4884
41+
--- /dev/null
42+
+++ b/test/Transforms/Sink/badloadsink.ll
43+
@@ -0,0 +1,18 @@
44+
+; RUN: opt < %s -basicaa -sink -S | FileCheck %s
45+
+declare void @foo(i64 *)
46+
+define i64 @sinkload(i1 %cmp) {
47+
+; CHECK-LABEL: @sinkload
48+
+top:
49+
+ %a = alloca i64
50+
+; CHECK: call void @foo(i64* %a)
51+
+; CHECK-NEXT: %x = load i64, i64* %a
52+
+ call void @foo(i64* %a)
53+
+ %x = load i64, i64* %a
54+
+ br i1 %cmp, label %A, label %B
55+
+A:
56+
+ store i64 0, i64 *%a
57+
+ br label %B
58+
+B:
59+
+; CHECK-NOT: load i64, i64 *%a
60+
+ ret i64 %x
61+
+}
62+
--
63+
2.9.3
64+

0 commit comments

Comments
 (0)