forked from QubesOS/qubes-vmm-xen
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpatch-0001-passthrough-vtd-Don-t-DMA-to-the-stack-in-queue_inva.patch
85 lines (75 loc) · 3.49 KB
/
patch-0001-passthrough-vtd-Don-t-DMA-to-the-stack-in-queue_inva.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
From 8970834eb95586d87b064e8c7fc49ee8d2875db4 Mon Sep 17 00:00:00 2001
From: Andrew Cooper <[email protected]>
Date: Thu, 19 Oct 2017 11:50:18 +0100
Subject: [PATCH] passthrough/vtd: Don't DMA to the stack in
queue_invalidate_wait()
DMA-ing to the stack is considered bad practice. In this case, if a
timeout occurs because of a sluggish device which is processing the
request, the completion notification will corrupt the stack of a
subsequent deeper call tree.
Place the poll_slot in a percpu area and DMA to that instead.
Fix the declaration of saddr in struct qinval_entry, to avoid a shift by
two. The requirement here is that the DMA address is dword aligned,
which is covered by poll_slot's type.
This change does not address other issues. Correlating completions
after a timeout with their request is a more complicated change.
Signed-off-by: Andrew Cooper <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>
Reviewed-by: Kevin Tian <[email protected]>
---
xen/drivers/passthrough/vtd/iommu.h | 3 +--
xen/drivers/passthrough/vtd/qinval.c | 9 +++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h
index 1a992f72d6db..c9290a399615 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -444,8 +444,7 @@ struct qinval_entry {
sdata : 32;
}lo;
struct {
- u64 res_1 : 2,
- saddr : 62;
+ u64 saddr;
}hi;
}inv_wait_dsc;
}q;
diff --git a/xen/drivers/passthrough/vtd/qinval.c b/xen/drivers/passthrough/vtd/qinval.c
index 01447cf9a84e..09cbd36ebb8c 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -147,13 +147,15 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu,
u8 iflag, u8 sw, u8 fn,
bool_t flush_dev_iotlb)
{
- volatile u32 poll_slot = QINVAL_STAT_INIT;
+ static DEFINE_PER_CPU(uint32_t, poll_slot);
unsigned int index;
unsigned long flags;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
+ uint32_t *this_poll_slot = &this_cpu(poll_slot);
spin_lock_irqsave(&iommu->register_lock, flags);
+ ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
index = qinval_next_index(iommu);
entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
@@ -166,8 +168,7 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu,
qinval_entry->q.inv_wait_dsc.lo.fn = fn;
qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
- qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(&poll_slot) >> 2;
+ qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(this_poll_slot);
unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
@@ -182,7 +183,7 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu,
timeout = NOW() + MILLISECS(flush_dev_iotlb ?
iommu_dev_iotlb_timeout : VTD_QI_TIMEOUT);
- while ( poll_slot != QINVAL_STAT_DONE )
+ while ( ACCESS_ONCE(*this_poll_slot) != QINVAL_STAT_DONE )
{
if ( NOW() > timeout )
{
--
2.26.3