Skip to content

Commit e281570

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/misc-6.10 into kvmarm-master/next
* kvm-arm64/misc-6.10: : . : Misc fixes and updates targeting 6.10 : : - Improve boot-time diagnostics when the sysreg tables : are not correctly sorted : : - Allow FFA_MSG_SEND_DIRECT_REQ in the FFA proxy : : - Fix duplicate XNX field in the ID_AA64MMFR1_EL1 : writeable mask : : - Allocate PPIs and SGIs outside of the vcpu structure, allowing : for smaller EL2 mapping and some flexibility in implementing : more or less than 32 private IRQs. : : - Use bitmap_gather() instead of its open-coded equivalent : : - Make protected mode use hVHE if available : : - Purge stale mpidr_data if a vcpu is created after the MPIDR : map has been created : . KVM: arm64: Destroy mpidr_data for 'late' vCPU creation KVM: arm64: Use hVHE in pKVM by default on CPUs with VHE support KVM: arm64: Fix hvhe/nvhe early alias parsing KVM: arm64: Convert kvm_mpidr_index() to bitmap_gather() KVM: arm64: vgic: Allocate private interrupts on demand KVM: arm64: Remove duplicated AA64MMFR1_EL1 XNX KVM: arm64: Remove FFA_MSG_SEND_DIRECT_REQ from the denylist KVM: arm64: Improve out-of-order sysreg table diagnostics Signed-off-by: Marc Zyngier <[email protected]>
2 parents 8540bd1 + ce5d244 commit e281570

File tree

7 files changed

+114
-48
lines changed

7 files changed

+114
-48
lines changed

Diff for: arch/arm64/include/asm/kvm_host.h

+3-13
Original file line numberDiff line numberDiff line change
@@ -221,20 +221,10 @@ struct kvm_mpidr_data {
221221

222222
static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
223223
{
224-
unsigned long mask = data->mpidr_mask;
225-
u64 aff = mpidr & MPIDR_HWID_BITMASK;
226-
int nbits, bit, bit_idx = 0;
227-
u16 index = 0;
224+
unsigned long index = 0, mask = data->mpidr_mask;
225+
unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
228226

229-
/*
230-
* If this looks like RISC-V's BEXT or x86's PEXT
231-
* instructions, it isn't by accident.
232-
*/
233-
nbits = fls(mask);
234-
for_each_set_bit(bit, &mask, nbits) {
235-
index |= (aff & BIT(bit)) >> (bit - bit_idx);
236-
bit_idx++;
237-
}
227+
bitmap_gather(&index, &aff, &mask, fls(mask));
238228

239229
return index;
240230
}

Diff for: arch/arm64/kernel/pi/idreg-override.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -209,8 +209,8 @@ static const struct {
209209
char alias[FTR_ALIAS_NAME_LEN];
210210
char feature[FTR_ALIAS_OPTION_LEN];
211211
} aliases[] __initconst = {
212-
{ "kvm_arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
213-
{ "kvm_arm.mode=protected", "id_aa64mmfr1.vh=0" },
212+
{ "kvm_arm.mode=nvhe", "arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" },
213+
{ "kvm_arm.mode=protected", "arm64_sw.hvhe=1" },
214214
{ "arm64.nosve", "id_aa64pfr0.sve=0" },
215215
{ "arm64.nosme", "id_aa64pfr1.sme=0" },
216216
{ "arm64.nobti", "id_aa64pfr1.bt=0" },

Diff for: arch/arm64/kvm/arm.c

+41-9
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
218218
kvm_sys_regs_create_debugfs(kvm);
219219
}
220220

221+
static void kvm_destroy_mpidr_data(struct kvm *kvm)
222+
{
223+
struct kvm_mpidr_data *data;
224+
225+
mutex_lock(&kvm->arch.config_lock);
226+
227+
data = rcu_dereference_protected(kvm->arch.mpidr_data,
228+
lockdep_is_held(&kvm->arch.config_lock));
229+
if (data) {
230+
rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
231+
synchronize_rcu();
232+
kfree(data);
233+
}
234+
235+
mutex_unlock(&kvm->arch.config_lock);
236+
}
237+
221238
/**
222239
* kvm_arch_destroy_vm - destroy the VM data structure
223240
* @kvm: pointer to the KVM struct
@@ -232,7 +249,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
232249
if (is_protected_kvm_enabled())
233250
pkvm_destroy_hyp_vm(kvm);
234251

235-
kfree(kvm->arch.mpidr_data);
252+
kvm_destroy_mpidr_data(kvm);
253+
236254
kfree(kvm->arch.sysreg_masks);
237255
kvm_destroy_vcpus(kvm);
238256

@@ -450,6 +468,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
450468

451469
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
452470

471+
/*
472+
* This vCPU may have been created after mpidr_data was initialized.
473+
* Throw out the pre-computed mappings if that is the case which forces
474+
* KVM to fall back to iteratively searching the vCPUs.
475+
*/
476+
kvm_destroy_mpidr_data(vcpu->kvm);
477+
453478
err = kvm_vgic_vcpu_init(vcpu);
454479
if (err)
455480
return err;
@@ -687,7 +712,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
687712

688713
mutex_lock(&kvm->arch.config_lock);
689714

690-
if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
715+
if (rcu_access_pointer(kvm->arch.mpidr_data) ||
716+
atomic_read(&kvm->online_vcpus) == 1)
691717
goto out;
692718

693719
kvm_for_each_vcpu(c, vcpu, kvm) {
@@ -724,7 +750,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
724750
data->cmpidr_to_idx[index] = c;
725751
}
726752

727-
kvm->arch.mpidr_data = data;
753+
rcu_assign_pointer(kvm->arch.mpidr_data, data);
728754
out:
729755
mutex_unlock(&kvm->arch.config_lock);
730756
}
@@ -2562,21 +2588,27 @@ static int __init init_hyp_mode(void)
25622588

25632589
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
25642590
{
2565-
struct kvm_vcpu *vcpu;
2591+
struct kvm_vcpu *vcpu = NULL;
2592+
struct kvm_mpidr_data *data;
25662593
unsigned long i;
25672594

25682595
mpidr &= MPIDR_HWID_BITMASK;
25692596

2570-
if (kvm->arch.mpidr_data) {
2571-
u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
2597+
rcu_read_lock();
2598+
data = rcu_dereference(kvm->arch.mpidr_data);
25722599

2573-
vcpu = kvm_get_vcpu(kvm,
2574-
kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
2600+
if (data) {
2601+
u16 idx = kvm_mpidr_index(data, mpidr);
2602+
2603+
vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
25752604
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
25762605
vcpu = NULL;
2606+
}
25772607

2608+
rcu_read_unlock();
2609+
2610+
if (vcpu)
25782611
return vcpu;
2579-
}
25802612

25812613
kvm_for_each_vcpu(i, vcpu, kvm) {
25822614
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))

Diff for: arch/arm64/kvm/hyp/nvhe/ffa.c

-1
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,6 @@ static bool ffa_call_supported(u64 func_id)
600600
case FFA_MSG_POLL:
601601
case FFA_MSG_WAIT:
602602
/* 32-bit variants of 64-bit calls */
603-
case FFA_MSG_SEND_DIRECT_REQ:
604603
case FFA_MSG_SEND_DIRECT_RESP:
605604
case FFA_RXTX_MAP:
606605
case FFA_MEM_DONATE:

Diff for: arch/arm64/kvm/sys_regs.c

+4-3
Original file line numberDiff line numberDiff line change
@@ -2338,7 +2338,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
23382338
ID_AA64MMFR0_EL1_TGRAN16_2)),
23392339
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
23402340
ID_AA64MMFR1_EL1_HCX |
2341-
ID_AA64MMFR1_EL1_XNX |
23422341
ID_AA64MMFR1_EL1_TWED |
23432342
ID_AA64MMFR1_EL1_XNX |
23442343
ID_AA64MMFR1_EL1_VH |
@@ -3069,12 +3068,14 @@ static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
30693068

30703069
for (i = 0; i < n; i++) {
30713070
if (!is_32 && table[i].reg && !table[i].reset) {
3072-
kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
3071+
kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
3072+
&table[i], i, table[i].name);
30733073
return false;
30743074
}
30753075

30763076
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
3077-
kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
3077+
kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
3078+
&table[i], i, table[i - 1].name, table[i].name);
30783079
return false;
30793080
}
30803081
}

Diff for: arch/arm64/kvm/vgic/vgic-init.c

+63-19
Original file line numberDiff line numberDiff line change
@@ -180,27 +180,22 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
180180
return 0;
181181
}
182182

183-
/**
184-
* kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
185-
* structures and register VCPU-specific KVM iodevs
186-
*
187-
* @vcpu: pointer to the VCPU being created and initialized
188-
*
189-
* Only do initialization, but do not actually enable the
190-
* VGIC CPU interface
191-
*/
192-
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
183+
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
193184
{
194185
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
195-
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
196-
int ret = 0;
197186
int i;
198187

199-
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
188+
lockdep_assert_held(&vcpu->kvm->arch.config_lock);
200189

201-
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
202-
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
203-
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
190+
if (vgic_cpu->private_irqs)
191+
return 0;
192+
193+
vgic_cpu->private_irqs = kcalloc(VGIC_NR_PRIVATE_IRQS,
194+
sizeof(struct vgic_irq),
195+
GFP_KERNEL_ACCOUNT);
196+
197+
if (!vgic_cpu->private_irqs)
198+
return -ENOMEM;
204199

205200
/*
206201
* Enable and configure all SGIs to be edge-triggered and
@@ -225,9 +220,48 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
225220
}
226221
}
227222

223+
return 0;
224+
}
225+
226+
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu)
227+
{
228+
int ret;
229+
230+
mutex_lock(&vcpu->kvm->arch.config_lock);
231+
ret = vgic_allocate_private_irqs_locked(vcpu);
232+
mutex_unlock(&vcpu->kvm->arch.config_lock);
233+
234+
return ret;
235+
}
236+
237+
/**
238+
* kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
239+
* structures and register VCPU-specific KVM iodevs
240+
*
241+
* @vcpu: pointer to the VCPU being created and initialized
242+
*
243+
* Only do initialization, but do not actually enable the
244+
* VGIC CPU interface
245+
*/
246+
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
247+
{
248+
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
249+
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
250+
int ret = 0;
251+
252+
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
253+
254+
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
255+
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
256+
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
257+
228258
if (!irqchip_in_kernel(vcpu->kvm))
229259
return 0;
230260

261+
ret = vgic_allocate_private_irqs(vcpu);
262+
if (ret)
263+
return ret;
264+
231265
/*
232266
* If we are creating a VCPU with a GICv3 we must also register the
233267
* KVM io device for the redistributor that belongs to this VCPU.
@@ -283,10 +317,13 @@ int vgic_init(struct kvm *kvm)
283317

284318
/* Initialize groups on CPUs created before the VGIC type was known */
285319
kvm_for_each_vcpu(idx, vcpu, kvm) {
286-
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
320+
ret = vgic_allocate_private_irqs_locked(vcpu);
321+
if (ret)
322+
goto out;
287323

288324
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
289-
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
325+
struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
326+
290327
switch (dist->vgic_model) {
291328
case KVM_DEV_TYPE_ARM_VGIC_V3:
292329
irq->group = 1;
@@ -298,8 +335,12 @@ int vgic_init(struct kvm *kvm)
298335
break;
299336
default:
300337
ret = -EINVAL;
301-
goto out;
302338
}
339+
340+
vgic_put_irq(kvm, irq);
341+
342+
if (ret)
343+
goto out;
303344
}
304345
}
305346

@@ -373,6 +414,9 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
373414
vgic_flush_pending_lpis(vcpu);
374415

375416
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
417+
kfree(vgic_cpu->private_irqs);
418+
vgic_cpu->private_irqs = NULL;
419+
376420
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
377421
vgic_unregister_redist_iodev(vcpu);
378422
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;

Diff for: include/kvm/arm_vgic.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ struct vgic_cpu {
331331
struct vgic_v3_cpu_if vgic_v3;
332332
};
333333

334-
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
334+
struct vgic_irq *private_irqs;
335335

336336
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
337337

0 commit comments

Comments
 (0)