Skip to content

Commit

Permalink
Merge branch kvm-arm64/vm-configuration into kvmarm/next
Browse files Browse the repository at this point in the history
* kvm-arm64/vm-configuration: (29 commits)
  : VM configuration enforcement, courtesy of Marc Zyngier
  :
  : Userspace has gained the ability to control the features visible
  : through the ID registers, yet KVM didn't take this into account as the
  : effective feature set when determing trap / emulation behavior. This
  : series adds:
  :
  :  - Mechanism for testing the presence of a particular CPU feature in the
  :    guest's ID registers
  :
  :  - Infrastructure for computing the effective value of VNCR-backed
  :    registers, taking into account the RES0 / RES1 bits for a particular
  :    VM configuration
  :
  :  - Implementation of 'fine-grained UNDEF' controls that shadow the FGT
  :    register definitions.
  KVM: arm64: Don't initialize idreg debugfs w/ preemption disabled
  KVM: arm64: Fail the idreg iterator if idregs aren't initialized
  KVM: arm64: Make build-time check of RES0/RES1 bits optional
  KVM: arm64: Add debugfs file for guest's ID registers
  KVM: arm64: Snapshot all non-zero RES0/RES1 sysreg fields for later checking
  KVM: arm64: Make FEAT_MOPS UNDEF if not advertised to the guest
  KVM: arm64: Make AMU sysreg UNDEF if FEAT_AMU is not advertised to the guest
  KVM: arm64: Make PIR{,E0}_EL1 UNDEF if S1PIE is not advertised to the guest
  KVM: arm64: Make TLBI OS/Range UNDEF if not advertised to the guest
  KVM: arm64: Streamline save/restore of HFG[RW]TR_EL2
  KVM: arm64: Move existing feature disabling over to FGU infrastructure
  KVM: arm64: Propagate and handle Fine-Grained UNDEF bits
  KVM: arm64: Add Fine-Grained UNDEF tracking information
  KVM: arm64: Rename __check_nv_sr_forward() to triage_sysreg_trap()
  KVM: arm64: Use the xarray as the primary sysreg/sysinsn walker
  KVM: arm64: Register AArch64 system register entries with the sysreg xarray
  KVM: arm64: Always populate the trap configuration xarray
  KVM: arm64: nv: Move system instructions to their own sys_reg_desc array
  KVM: arm64: Drop the requirement for XARRAY_MULTI
  KVM: arm64: nv: Turn encoding ranges into discrete XArray stores
  ...

Signed-off-by: Oliver Upton <[email protected]>
  • Loading branch information
oupton committed Mar 7, 2024
2 parents a040adf + 5c1ebe9 commit 0d87485
Show file tree
Hide file tree
Showing 15 changed files with 997 additions and 181 deletions.
4 changes: 1 addition & 3 deletions arch/arm64/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,7 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)

#define HCRX_GUEST_FLAGS \
(HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \
(cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0))
#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)

/* TCR_EL2 Registers bits */
Expand Down
99 changes: 98 additions & 1 deletion arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
return index;
}

struct kvm_sysreg_masks;

enum fgt_group_id {
__NO_FGT_GROUP__,
HFGxTR_GROUP,
HDFGRTR_GROUP,
HDFGWTR_GROUP = HDFGRTR_GROUP,
HFGITR_GROUP,
HAFGRTR_GROUP,

/* Must be last */
__NR_FGT_GROUP_IDS__
};

struct kvm_arch {
struct kvm_s2_mmu mmu;

/*
* Fine-Grained UNDEF, mimicking the FGT layout defined by the
* architecture. We track them globally, as we present the
* same feature-set to all vcpus.
*
* Index 0 is currently spare.
*/
u64 fgu[__NR_FGT_GROUP_IDS__];

/* Interrupt controller */
struct vgic_dist vgic;

Expand Down Expand Up @@ -274,6 +297,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
/* Initial ID reg values loaded */
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
/* Fine-Grained UNDEF initialised */
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
unsigned long flags;

/* VM-wide vCPU feature set */
Expand All @@ -294,6 +319,9 @@ struct kvm_arch {
/* PMCR_EL0.N value for the guest */
u8 pmcr_n;

/* Iterator for idreg debugfs */
u8 idreg_debugfs_iter;

/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
struct maple_tree smccc_filter;
Expand All @@ -312,6 +340,9 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];

/* Masks for VNCR-baked sysregs */
struct kvm_sysreg_masks *sysreg_masks;

/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor.
Expand Down Expand Up @@ -474,6 +505,13 @@ enum vcpu_sysreg {
NR_SYS_REGS /* Nothing after this line! */
};

struct kvm_sysreg_masks {
struct {
u64 res0;
u64 res1;
} mask[NR_SYS_REGS - __VNCR_START__];
};

struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */

Expand Down Expand Up @@ -549,6 +587,7 @@ struct kvm_vcpu_arch {

/* Values of trap registers for the guest. */
u64 hcr_el2;
u64 hcrx_el2;
u64 mdcr_el2;
u64 cptr_el2;

Expand Down Expand Up @@ -868,7 +907,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)

#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))

#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
#define __vcpu_sys_reg(v,r) \
(*({ \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
*__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
__r; \
}))

u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
Expand Down Expand Up @@ -1055,14 +1102,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);

void kvm_sys_regs_create_debugfs(struct kvm *kvm);
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);

int __init kvm_sys_reg_table_init(void);
struct sys_reg_desc;
int __init populate_sysreg_config(const struct sys_reg_desc *sr,
unsigned int idx);
int __init populate_nv_trap_config(void);

bool lock_all_vcpus(struct kvm *kvm);
void unlock_all_vcpus(struct kvm *kvm);

void kvm_init_sysreg(struct kvm_vcpu *);

/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
Expand Down Expand Up @@ -1233,4 +1286,48 @@ static inline void kvm_hyp_reserve(void) { }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);

#define __expand_field_sign_unsigned(id, fld, val) \
((u64)SYS_FIELD_VALUE(id, fld, val))

#define __expand_field_sign_signed(id, fld, val) \
({ \
u64 __val = SYS_FIELD_VALUE(id, fld, val); \
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
})

#define expand_field_sign(id, fld, val) \
(id##_##fld##_SIGNED ? \
__expand_field_sign_signed(id, fld, val) : \
__expand_field_sign_unsigned(id, fld, val))

#define get_idreg_field_unsigned(kvm, id, fld) \
({ \
u64 __val = IDREG((kvm), SYS_##id); \
FIELD_GET(id##_##fld##_MASK, __val); \
})

#define get_idreg_field_signed(kvm, id, fld) \
({ \
u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
})

#define get_idreg_field_enum(kvm, id, fld) \
get_idreg_field_unsigned(kvm, id, fld)

#define get_idreg_field(kvm, id, fld) \
(id##_##fld##_SIGNED ? \
get_idreg_field_signed(kvm, id, fld) : \
get_idreg_field_unsigned(kvm, id, fld))

#define kvm_has_feat(kvm, id, fld, limit) \
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))

#define kvm_has_feat_enum(kvm, id, fld, val) \
(get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))

#define kvm_has_feat_range(kvm, id, fld, min, max) \
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))

#endif /* __ARM64_KVM_HOST_H__ */
1 change: 0 additions & 1 deletion arch/arm64/include/asm/kvm_nested.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48);
}

extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);

int kvm_init_nv_sysregs(struct kvm *kvm);

Expand Down
12 changes: 11 additions & 1 deletion arch/arm64/kvm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ menuconfig KVM
select HAVE_KVM_VCPU_RUN_PID_CHANGE
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
select XARRAY_MULTI
help
Support hosting virtualized guest machines.

Expand Down Expand Up @@ -68,4 +67,15 @@ config PROTECTED_NVHE_STACKTRACE

If unsure, or not using protected nVHE (pKVM), say N.

config KVM_ARM64_RES_BITS_PARANOIA
bool "Build-time check of RES0/RES1 bits"
depends on KVM
default n
help
Say Y here to validate that KVM's knowledge of most system
registers' RES0/RES1 bits matches when the rest of the kernel
defines. Expect the build to fail badly if you enable this.

Just say N.

endif # VIRTUALIZATION
11 changes: 11 additions & 0 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}

void kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
kvm_sys_regs_create_debugfs(kvm);
}

/**
* kvm_arch_destroy_vm - destroy the VM data structure
Expand All @@ -206,6 +210,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
pkvm_destroy_hyp_vm(kvm);

kfree(kvm->arch.mpidr_data);
kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);

kvm_unshare_hyp(kvm, kvm + 1);
Expand Down Expand Up @@ -674,6 +679,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret;
}

/*
* This needs to happen after NV has imposed its own restrictions on
* the feature set
*/
kvm_init_sysreg(vcpu);

ret = kvm_timer_enable(vcpu);
if (ret)
return ret;
Expand Down
125 changes: 125 additions & 0 deletions arch/arm64/kvm/check-res-bits.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024 - Google LLC
* Author: Marc Zyngier <[email protected]>
*/

#include <asm/sysreg-defs.h>

/*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
*
* If any of these BUILD_BUG_ON() fails, that's because some bits that
* were reserved have gained some other meaning, and KVM needs to know
* about those.
*
* In such case, do *NOT* blindly change the assertion so that it
* passes, but also teach the rest of the code about the actual
* change.
*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
*/
static inline void check_res_bits(void)
{
#ifdef CONFIG_KVM_ARM64_RES_BITS_PARANOIA

BUILD_BUG_ON(OSDTRRX_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MDCCINT_EL1_RES0 != (GENMASK_ULL(63, 31) | GENMASK_ULL(28, 0)));
BUILD_BUG_ON(MDSCR_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(28, 28) | GENMASK_ULL(25, 24) | GENMASK_ULL(20, 20) | GENMASK_ULL(18, 16) | GENMASK_ULL(11, 7) | GENMASK_ULL(5, 1)));
BUILD_BUG_ON(OSDTRTX_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(OSECCR_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(OSLAR_EL1_RES0 != (GENMASK_ULL(63, 1)));
BUILD_BUG_ON(ID_PFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_PFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_DFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_AFR0_EL1_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(ID_MMFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_MMFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_MMFR2_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_MMFR3_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR0_EL1_RES0 != (GENMASK_ULL(63, 28)));
BUILD_BUG_ON(ID_ISAR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR2_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR3_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR4_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR5_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(23, 20)));
BUILD_BUG_ON(ID_ISAR6_EL1_RES0 != (GENMASK_ULL(63, 28)));
BUILD_BUG_ON(ID_MMFR4_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MVFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MVFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MVFR2_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(ID_PFR2_EL1_RES0 != (GENMASK_ULL(63, 12)));
BUILD_BUG_ON(ID_DFR1_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(ID_MMFR5_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(ID_AA64PFR1_EL1_RES0 != (GENMASK_ULL(23, 20)));
BUILD_BUG_ON(ID_AA64PFR2_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(31, 12)));
BUILD_BUG_ON(ID_AA64ZFR0_EL1_RES0 != (GENMASK_ULL(63, 60) | GENMASK_ULL(51, 48) | GENMASK_ULL(39, 36) | GENMASK_ULL(31, 28) | GENMASK_ULL(15, 8)));
BUILD_BUG_ON(ID_AA64SMFR0_EL1_RES0 != (GENMASK_ULL(62, 61) | GENMASK_ULL(51, 49) | GENMASK_ULL(31, 31) | GENMASK_ULL(27, 0)));
BUILD_BUG_ON(ID_AA64FPFR0_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 2)));
BUILD_BUG_ON(ID_AA64DFR0_EL1_RES0 != (GENMASK_ULL(27, 24) | GENMASK_ULL(19, 16)));
BUILD_BUG_ON(ID_AA64DFR1_EL1_RES0 != (GENMASK_ULL(63, 0)));
BUILD_BUG_ON(ID_AA64AFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_AA64AFR1_EL1_RES0 != (GENMASK_ULL(63, 0)));
BUILD_BUG_ON(ID_AA64ISAR0_EL1_RES0 != (GENMASK_ULL(3, 0)));
BUILD_BUG_ON(ID_AA64ISAR2_EL1_RES0 != (GENMASK_ULL(47, 44)));
BUILD_BUG_ON(ID_AA64ISAR3_EL1_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(ID_AA64MMFR0_EL1_RES0 != (GENMASK_ULL(55, 48)));
BUILD_BUG_ON(ID_AA64MMFR2_EL1_RES0 != (GENMASK_ULL(47, 44)));
BUILD_BUG_ON(ID_AA64MMFR3_EL1_RES0 != (GENMASK_ULL(51, 48)));
BUILD_BUG_ON(ID_AA64MMFR4_EL1_RES0 != (GENMASK_ULL(63, 40) | GENMASK_ULL(35, 28) | GENMASK_ULL(3, 0)));
BUILD_BUG_ON(SCTLR_EL1_RES0 != (GENMASK_ULL(17, 17)));
BUILD_BUG_ON(CPACR_ELx_RES0 != (GENMASK_ULL(63, 30) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | GENMASK_ULL(19, 18) | GENMASK_ULL(15, 0)));
BUILD_BUG_ON(SMPRI_EL1_RES0 != (GENMASK_ULL(63, 4)));
BUILD_BUG_ON(ZCR_ELx_RES0 != (GENMASK_ULL(63, 9)));
BUILD_BUG_ON(SMCR_ELx_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(29, 9)));
BUILD_BUG_ON(GCSCR_ELx_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(7, 7) | GENMASK_ULL(4, 1)));
BUILD_BUG_ON(GCSPR_ELx_RES0 != (GENMASK_ULL(2, 0)));
BUILD_BUG_ON(GCSCRE0_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(7, 6) | GENMASK_ULL(4, 1)));
BUILD_BUG_ON(ALLINT_RES0 != (GENMASK_ULL(63, 14) | GENMASK_ULL(12, 0)));
BUILD_BUG_ON(PMSCR_EL1_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2)));
BUILD_BUG_ON(PMSICR_EL1_RES0 != (GENMASK_ULL(55, 32)));
BUILD_BUG_ON(PMSIRR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(7, 1)));
BUILD_BUG_ON(PMSFCR_EL1_RES0 != (GENMASK_ULL(63, 19) | GENMASK_ULL(15, 4)));
BUILD_BUG_ON(PMSLATFR_EL1_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(PMSIDR_EL1_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(7, 7)));
BUILD_BUG_ON(PMBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 6) | GENMASK_ULL(4, 3)));
BUILD_BUG_ON(PMBSR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(25, 20)));
BUILD_BUG_ON(PMBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6)));
BUILD_BUG_ON(CONTEXTIDR_ELx_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(CCSIDR_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(CLIDR_EL1_RES0 != (GENMASK_ULL(63, 47)));
BUILD_BUG_ON(CCSIDR2_EL1_RES0 != (GENMASK_ULL(63, 24)));
BUILD_BUG_ON(GMID_EL1_RES0 != (GENMASK_ULL(63, 4)));
BUILD_BUG_ON(SMIDR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(14, 12)));
BUILD_BUG_ON(CSSELR_EL1_RES0 != (GENMASK_ULL(63, 5)));
BUILD_BUG_ON(CTR_EL0_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(30, 30) | GENMASK_ULL(13, 4)));
BUILD_BUG_ON(CTR_EL0_RES1 != (GENMASK_ULL(31, 31)));
BUILD_BUG_ON(DCZID_EL0_RES0 != (GENMASK_ULL(63, 5)));
BUILD_BUG_ON(SVCR_RES0 != (GENMASK_ULL(63, 2)));
BUILD_BUG_ON(FPMR_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(23, 23) | GENMASK_ULL(13, 9)));
BUILD_BUG_ON(HFGxTR_EL2_RES0 != (GENMASK_ULL(51, 51)));
BUILD_BUG_ON(HFGITR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(61, 61)));
BUILD_BUG_ON(HDFGRTR_EL2_RES0 != (GENMASK_ULL(49, 49) | GENMASK_ULL(42, 42) | GENMASK_ULL(39, 38) | GENMASK_ULL(21, 20) | GENMASK_ULL(8, 8)));
BUILD_BUG_ON(HDFGWTR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(59, 58) | GENMASK_ULL(51, 51) | GENMASK_ULL(47, 47) | GENMASK_ULL(43, 43) | GENMASK_ULL(40, 38) | GENMASK_ULL(34, 34) | GENMASK_ULL(30, 30) | GENMASK_ULL(22, 22) | GENMASK_ULL(9, 9) | GENMASK_ULL(6, 6)));
BUILD_BUG_ON(HAFGRTR_EL2_RES0 != (GENMASK_ULL(63, 50) | GENMASK_ULL(16, 5)));
BUILD_BUG_ON(HCRX_EL2_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(13, 12)));
BUILD_BUG_ON(DACR32_EL2_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(PMSCR_EL2_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2)));
BUILD_BUG_ON(TCR2_EL1x_RES0 != (GENMASK_ULL(63, 16) | GENMASK_ULL(13, 12) | GENMASK_ULL(9, 6)));
BUILD_BUG_ON(TCR2_EL2_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(LORSA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 1)));
BUILD_BUG_ON(LOREA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 0)));
BUILD_BUG_ON(LORN_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(LORC_EL1_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(1, 1)));
BUILD_BUG_ON(LORID_EL1_RES0 != (GENMASK_ULL(63, 24) | GENMASK_ULL(15, 8)));
BUILD_BUG_ON(ISR_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(5, 0)));
BUILD_BUG_ON(ICC_NMIAR1_EL1_RES0 != (GENMASK_ULL(63, 24)));
BUILD_BUG_ON(TRBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 7)));
BUILD_BUG_ON(TRBBASER_EL1_RES0 != (GENMASK_ULL(11, 0)));
BUILD_BUG_ON(TRBSR_EL1_RES0 != (GENMASK_ULL(63, 56) | GENMASK_ULL(25, 24) | GENMASK_ULL(19, 19) | GENMASK_ULL(16, 16)));
BUILD_BUG_ON(TRBMAR_EL1_RES0 != (GENMASK_ULL(63, 12)));
BUILD_BUG_ON(TRBTRG_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(TRBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6)));

#endif
}
Loading

0 comments on commit 0d87485

Please sign in to comment.