Skip to content

Commit

Permalink
KVM: arm64: Destroy mpidr_data for 'late' vCPU creation
Browse files Browse the repository at this point in the history
A particularly annoying userspace could create a vCPU after KVM has
computed mpidr_data for the VM, either by racing against VGIC
initialization or having a userspace irqchip.

In any case, this means mpidr_data no longer fully describes the VM, and
attempts to find the new vCPU with kvm_mpidr_to_vcpu() will fail. The
fix is to discard mpidr_data altogether, as it is only a performance
optimization and not required for correctness. In all likelihood KVM
will recompute the mappings when KVM_RUN is called on the new vCPU.

Note that reads of mpidr_data are not guarded by a lock; promote to RCU
to cope with the possibility of mpidr_data being invalidated at runtime.

Fixes: 54a8006 ("KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available")
Signed-off-by: Oliver Upton <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Marc Zyngier <[email protected]>
  • Loading branch information
oupton authored and Marc Zyngier committed May 8, 2024
1 parent 5053c3f commit ce5d244
Showing 1 changed file with 41 additions and 9 deletions.
50 changes: 41 additions & 9 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
kvm_sys_regs_create_debugfs(kvm);
}

static void kvm_destroy_mpidr_data(struct kvm *kvm)
{
struct kvm_mpidr_data *data;

mutex_lock(&kvm->arch.config_lock);

data = rcu_dereference_protected(kvm->arch.mpidr_data,
lockdep_is_held(&kvm->arch.config_lock));
if (data) {
rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
synchronize_rcu();
kfree(data);
}

mutex_unlock(&kvm->arch.config_lock);
}

/**
* kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct
Expand All @@ -209,7 +226,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);

kfree(kvm->arch.mpidr_data);
kvm_destroy_mpidr_data(kvm);

kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);

Expand Down Expand Up @@ -395,6 +413,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)

vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;

/*
* This vCPU may have been created after mpidr_data was initialized.
* Throw out the pre-computed mappings if that is the case which forces
* KVM to fall back to iteratively searching the vCPUs.
*/
kvm_destroy_mpidr_data(vcpu->kvm);

err = kvm_vgic_vcpu_init(vcpu);
if (err)
return err;
Expand Down Expand Up @@ -594,7 +619,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)

mutex_lock(&kvm->arch.config_lock);

if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
if (rcu_access_pointer(kvm->arch.mpidr_data) ||
atomic_read(&kvm->online_vcpus) == 1)
goto out;

kvm_for_each_vcpu(c, vcpu, kvm) {
Expand Down Expand Up @@ -631,7 +657,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
data->cmpidr_to_idx[index] = c;
}

kvm->arch.mpidr_data = data;
rcu_assign_pointer(kvm->arch.mpidr_data, data);
out:
mutex_unlock(&kvm->arch.config_lock);
}
Expand Down Expand Up @@ -2470,21 +2496,27 @@ static int __init init_hyp_mode(void)

struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
struct kvm_vcpu *vcpu;
struct kvm_vcpu *vcpu = NULL;
struct kvm_mpidr_data *data;
unsigned long i;

mpidr &= MPIDR_HWID_BITMASK;

if (kvm->arch.mpidr_data) {
u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
rcu_read_lock();
data = rcu_dereference(kvm->arch.mpidr_data);

vcpu = kvm_get_vcpu(kvm,
kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
if (data) {
u16 idx = kvm_mpidr_index(data, mpidr);

vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
}

rcu_read_unlock();

if (vcpu)
return vcpu;
}

kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
Expand Down

0 comments on commit ce5d244

Please sign in to comment.