Skip to content

Commit

Permalink
KVM: arm64: vgic-its: Use the per-ITS translation cache for injection
Browse files Browse the repository at this point in the history
Everything is in place to switch to per-ITS translation caches. Start
using the per-ITS cache to avoid the lock serialization related to the
global translation cache. Explicitly check for out-of-range device and
event IDs as the cache index is packed based on the range the ITS
actually supports.

Take the RCU read lock to protect against the returned descriptor being
freed while trying to take a reference on it, as it is no longer
necessary to acquire the lpi_list_lock.

Signed-off-by: Oliver Upton <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Marc Zyngier <[email protected]>
  • Loading branch information
oupton authored and Marc Zyngier committed Apr 25, 2024
1 parent dedfcd1 commit e64f291
Showing 1 changed file with 17 additions and 46 deletions.
63 changes: 17 additions & 46 deletions arch/arm64/kvm/vgic/vgic-its.c
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,10 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,

#define GIC_LPI_OFFSET 8192

#define VITS_TYPER_IDBITS 16
#define VITS_TYPER_DEVBITS 16
#define VITS_TYPER_IDBITS 16
#define VITS_MAX_EVENTID (BIT(VITS_TYPER_IDBITS) - 1)
#define VITS_TYPER_DEVBITS 16
#define VITS_MAX_DEVID (BIT(VITS_TYPER_DEVBITS) - 1)
#define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
#define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)

Expand Down Expand Up @@ -536,51 +538,27 @@ static unsigned long vgic_its_cache_key(u32 devid, u32 eventid)

}

static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
phys_addr_t db,
u32 devid, u32 eventid)
{
struct vgic_translation_cache_entry *cte;

list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
/*
* If we hit a NULL entry, there is nothing after this
* point.
*/
if (!cte->irq)
break;

if (cte->db != db || cte->devid != devid ||
cte->eventid != eventid)
continue;

/*
* Move this entry to the head, as it is the most
* recently used.
*/
if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
list_move(&cte->entry, &dist->lpi_translation_cache);

return cte->irq;
}

return NULL;
}

static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
u32 devid, u32 eventid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long cache_key = vgic_its_cache_key(devid, eventid);
struct vgic_its *its;
struct vgic_irq *irq;
unsigned long flags;

raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (devid > VITS_MAX_DEVID || eventid > VITS_MAX_EVENTID)
return NULL;

its = __vgic_doorbell_to_its(kvm, db);
if (IS_ERR(its))
return NULL;

irq = __vgic_its_check_cache(dist, db, devid, eventid);
rcu_read_lock();

irq = xa_load(&its->translation_cache, cache_key);
if (!vgic_try_get_irq_kref(irq))
irq = NULL;

raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
rcu_read_unlock();

return irq;
}
Expand All @@ -605,14 +583,7 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
if (unlikely(list_empty(&dist->lpi_translation_cache)))
goto out;

/*
* We could have raced with another CPU caching the same
* translation behind our back, so let's check it is not in
* already
*/
db = its->vgic_its_base + GITS_TRANSLATER;
if (__vgic_its_check_cache(dist, db, devid, eventid))
goto out;

/* Always reuse the last entry (LRU policy) */
cte = list_last_entry(&dist->lpi_translation_cache,
Expand Down Expand Up @@ -958,7 +929,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,

switch (type) {
case GITS_BASER_TYPE_DEVICE:
if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
if (id > VITS_MAX_DEVID)
return false;
break;
case GITS_BASER_TYPE_COLLECTION:
Expand Down

0 comments on commit e64f291

Please sign in to comment.