diff options
Diffstat (limited to 'include/kvm')
-rw-r--r-- | include/kvm/arm_arch_timer.h | 92 | ||||
-rw-r--r-- | include/kvm/arm_hypercalls.h | 14 | ||||
-rw-r--r-- | include/kvm/arm_pmu.h | 122 | ||||
-rw-r--r-- | include/kvm/arm_psci.h | 20 | ||||
-rw-r--r-- | include/kvm/arm_vgic.h | 104 | ||||
-rw-r--r-- | include/kvm/iodev.h | 6 |
6 files changed, 275 insertions, 83 deletions
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 51c19381108c..681cf0c8b9df 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -13,6 +13,9 @@ enum kvm_arch_timers { TIMER_PTIMER, TIMER_VTIMER, + NR_KVM_EL0_TIMERS, + TIMER_HVTIMER = NR_KVM_EL0_TIMERS, + TIMER_HPTIMER, NR_KVM_TIMERS }; @@ -21,17 +24,41 @@ enum kvm_arch_timer_regs { TIMER_REG_CVAL, TIMER_REG_TVAL, TIMER_REG_CTL, + TIMER_REG_VOFF, +}; + +struct arch_timer_offset { + /* + * If set, pointer to one of the offsets in the kvm's offset + * structure. If NULL, assume a zero offset. + */ + u64 *vm_offset; + /* + * If set, pointer to one of the offsets in the vcpu's sysreg + * array. If NULL, assume a zero offset. + */ + u64 *vcpu_offset; +}; + +struct arch_timer_vm_data { + /* Offset applied to the virtual timer/counter */ + u64 voffset; + /* Offset applied to the physical timer/counter */ + u64 poffset; + + /* The PPI for each timer, global to the VM */ + u8 ppi[NR_KVM_TIMERS]; }; struct arch_timer_context { struct kvm_vcpu *vcpu; - /* Timer IRQ */ - struct kvm_irq_level irq; - /* Emulated Timer (may be unused) */ struct hrtimer hrtimer; + u64 ns_frac; + /* Offset for this counter/timer */ + struct arch_timer_offset offset; /* * We have multiple paths which can save/restore the timer state onto * the hardware, so we need some way of keeping track of where the @@ -39,17 +66,24 @@ struct arch_timer_context { */ bool loaded; + /* Output level of the timer IRQ */ + struct { + bool level; + } irq; + /* Duplicated state from arch_timer.c for convenience */ u32 host_timer_irq; - u32 host_timer_irq_flags; }; struct timer_map { struct arch_timer_context *direct_vtimer; struct arch_timer_context *direct_ptimer; + struct arch_timer_context *emul_vtimer; struct arch_timer_context *emul_ptimer; }; +void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map); + struct arch_timer_cpu { struct arch_timer_context timers[NR_KVM_TIMERS]; @@ -60,15 +94,18 @@ struct arch_timer_cpu { bool enabled; }; -int kvm_timer_hyp_init(bool); +int __init kvm_timer_hyp_init(bool has_gic); int kvm_timer_enable(struct kvm_vcpu *vcpu); -int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_timer_sync_nested(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); void kvm_timer_update_run(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); +void kvm_timer_init_vm(struct kvm *kvm); + u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); @@ -76,8 +113,6 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); - u64 kvm_phys_timer_read(void); void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); @@ -85,15 +120,18 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); void kvm_timer_init_vhe(void); -bool kvm_arch_timer_get_input_level(int vintid); - #define vcpu_timer(v) (&(v)->arch.timer_cpu) #define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)]) #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER]) #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER]) +#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER]) +#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER]) #define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) +#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data) +#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)]) + u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, enum kvm_arch_timer_regs treg); @@ -106,4 +144,38 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, u32 timer_get_ctl(struct arch_timer_context *ctxt); u64 timer_get_cval(struct arch_timer_context *ctxt); +/* CPU HP callbacks */ +void kvm_timer_cpu_up(void); +void kvm_timer_cpu_down(void); + +/* CNTKCTL_EL1 valid bits as of DDI0487J.a */ +#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0)) + +DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key); + +static inline bool has_broken_cntvoff(void) +{ + return static_branch_unlikely(&broken_cntvoff_key); +} + +static inline bool has_cntpoff(void) +{ + return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); +} + +static inline u64 timer_get_offset(struct arch_timer_context *ctxt) +{ + u64 offset = 0; + + if (!ctxt) + return 0; + + if (ctxt->offset.vm_offset) + offset += *ctxt->offset.vm_offset; + if (ctxt->offset.vcpu_offset) + offset += *ctxt->offset.vcpu_offset; + + return offset; +} + #endif diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h index 0e2509d27910..2df152207ccd 100644 --- a/include/kvm/arm_hypercalls.h +++ b/include/kvm/arm_hypercalls.h @@ -6,7 +6,7 @@ #include <asm/kvm_emulate.h> -int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); +int kvm_smccc_call_handler(struct kvm_vcpu *vcpu); static inline u32 smccc_get_function(struct kvm_vcpu *vcpu) { @@ -40,4 +40,16 @@ static inline void smccc_set_retval(struct kvm_vcpu *vcpu, vcpu_set_reg(vcpu, 3, a3); } +struct kvm_one_reg; + +void kvm_arm_init_hypercalls(struct kvm *kvm); +void kvm_arm_teardown_hypercalls(struct kvm *kvm); +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); + +int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr); +int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr); + #endif diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 1d94acd0bc85..96754b51b411 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -8,39 +8,46 @@ #define __ASM_ARM_KVM_PMU_H #include <linux/perf_event.h> -#include <asm/perf_event.h> +#include <linux/perf/arm_pmuv3.h> -#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) -#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) - -#ifdef CONFIG_KVM_ARM_PMU +#define KVM_ARMV8_PMU_MAX_COUNTERS 32 +#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; }; +struct kvm_pmu_events { + u64 events_host; + u64 events_guest; +}; + struct kvm_pmu { + struct irq_work overflow_work; + struct kvm_pmu_events events; + struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS]; int irq_num; - struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; - DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); - bool ready; bool created; bool irq_level; - struct irq_work overflow_work; }; -#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +struct arm_pmu_entry { + struct list_head entry; + struct arm_pmu *arm_pmu; +}; + +bool kvm_supports_guest_pmuv3(void); #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); -u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); +u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); +u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); -void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); -void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); @@ -49,7 +56,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); -bool kvm_arm_support_pmu_v3(void); +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, @@ -57,11 +64,43 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); + +struct kvm_pmu_events *kvm_get_pmu_events(void); +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_resync_el0(void); + +#define kvm_vcpu_has_pmu(vcpu) \ + (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) + +/* + * Updates the vcpu's view of the pmu events for this cpu. + * Must be called before every vcpu run after disabling interrupts, to ensure + * that an interrupt cannot fire and update the structure. + */ +#define kvm_pmu_update_vcpu_events(vcpu) \ + do { \ + if (!has_vhe() && system_supports_pmuv3()) \ + vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ + } while (0) + +u8 kvm_arm_pmu_get_pmuver_limit(void); +u64 kvm_pmu_evtyper_mask(struct kvm *kvm); +int kvm_arm_set_default_pmu(struct kvm *kvm); +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); + +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); +bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); +void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; -#define kvm_arm_pmu_v3_ready(v) (false) +static inline bool kvm_supports_guest_pmuv3(void) +{ + return false; +} + #define kvm_arm_pmu_irq_initialized(v) (false) static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) @@ -70,15 +109,19 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, } static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) {} -static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, + u64 select_idx, u64 val) {} +static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) { return 0; } static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} -static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} -static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} -static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) @@ -90,7 +133,6 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} -static inline bool kvm_arm_support_pmu_v3(void) { return false; } static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { @@ -114,6 +156,44 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) { return 0; } + +#define kvm_vcpu_has_pmu(vcpu) ({ false; }) +static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} +static inline u8 kvm_arm_pmu_get_pmuver_limit(void) +{ + return 0; +} +static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + return 0; +} +static inline void kvm_vcpu_pmu_resync_el0(void) {} + +static inline int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + return -ENODEV; +} + +static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + return 0; +} + +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) +{ + return false; +} + +static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {} + #endif #endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 5b58bd2fe088..cbaec804eb83 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -13,14 +13,13 @@ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) +#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1) +#define KVM_ARM_PSCI_1_2 PSCI_VERSION(1, 2) +#define KVM_ARM_PSCI_1_3 PSCI_VERSION(1, 3) -#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_3 -/* - * We need the KVM pointer independently from the vcpu as we can call - * this from HYP, and need to apply kern_hyp_va on it... - */ -static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +static inline int kvm_psci_version(struct kvm_vcpu *vcpu) { /* * Our PSCI implementation stays the same across versions from @@ -29,7 +28,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) * revisions. It is thus safe to return the latest, unless * userspace has instructed us otherwise. */ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) { if (vcpu->kvm->arch.psci_version) return vcpu->kvm->arch.psci_version; @@ -42,11 +41,4 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) int kvm_psci_call(struct kvm_vcpu *vcpu); -struct kvm_one_reg; - -int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); -int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); - #endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index a8d8fdcd3723..4a34f7f0a864 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -5,12 +5,15 @@ #ifndef __KVM_ARM_VGIC_H #define __KVM_ARM_VGIC_H -#include <linux/kernel.h> +#include <linux/bits.h> #include <linux/kvm.h> #include <linux/irqreturn.h> +#include <linux/kref.h> +#include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/static_key.h> #include <linux/types.h> +#include <linux/xarray.h> #include <kvm/iodev.h> #include <linux/list.h> #include <linux/jump_label.h> @@ -23,7 +26,6 @@ #define VGIC_NR_SGIS 16 #define VGIC_NR_PPIS 16 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) -#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) #define VGIC_MAX_SPI 1019 #define VGIC_MAX_RESERVED 1023 #define VGIC_MIN_LPI 8192 @@ -72,6 +74,9 @@ struct vgic_global { bool has_gicv4; bool has_gicv4_1; + /* Pseudo GICv3 from outer space */ + bool no_hw_deactivation; + /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; @@ -89,9 +94,29 @@ enum vgic_irq_config { VGIC_CONFIG_LEVEL }; +/* + * Per-irq ops overriding some common behavious. + * + * Always called in non-preemptible section and the functions can use + * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs. + */ +struct irq_ops { + /* Per interrupt flags for special-cased interrupts */ + unsigned long flags; + +#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */ + + /* + * Callback function pointer to in-kernel devices that can tell us the + * state of the input level of mapped level-triggered IRQ faster than + * peaking into the physical GIC. + */ + bool (*get_input_level)(int vintid); +}; + struct vgic_irq { raw_spinlock_t irq_lock; /* Protects the content of the struct */ - struct list_head lpi_list; /* Used to link all LPIs together */ + struct rcu_head rcu; struct list_head ap_list; struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU @@ -126,21 +151,17 @@ struct vgic_irq { u8 group; /* 0 == group 0, 1 == group 1 */ enum vgic_irq_config config; /* Level or edge */ - /* - * Callback function pointer to in-kernel devices that can tell us the - * state of the input level of mapped level-triggered IRQ faster than - * peaking into the physical GIC. - * - * Always called in non-preemptible section and the functions can use - * kvm_arm_get_running_vcpu() to get the vcpu pointer for private - * IRQs. - */ - bool (*get_input_level)(int vintid); + struct irq_ops *ops; void *owner; /* Opaque pointer to reserve an interrupt for in-kernel devices. */ }; +static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq) +{ + return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE); +} + struct vgic_register_region; struct vgic_its; @@ -188,6 +209,12 @@ struct vgic_its { struct mutex its_lock; struct list_head device_list; struct list_head collection_list; + + /* + * Caches the (device_id, event_id) -> vgic_irq translation for + * LPIs that are mapped and enabled. + */ + struct xarray translation_cache; }; struct vgic_state_iter; @@ -210,6 +237,9 @@ struct vgic_dist { /* Implementation revision as reported in the GICD_IIDR */ u32 implementation_rev; +#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */ +#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */ +#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3 /* Userspace can write to GICv2 IGROUPR */ bool v2_groups_user_writable; @@ -219,6 +249,9 @@ struct vgic_dist { int nr_spis; + /* The GIC maintenance IRQ for nested hypervisors. */ + u32 mi_intid; + /* base addresses in guest physical address space: */ gpa_t vgic_dist_base; /* distributor */ union { @@ -239,6 +272,7 @@ struct vgic_dist { struct vgic_io_device dist_iodev; bool has_its; + bool table_write_in_progress; /* * Contains the attributes and gpa of the LPI configuration table. @@ -248,13 +282,8 @@ struct vgic_dist { */ u64 propbaser; - /* Protects the lpi_list and the count value below. */ - raw_spinlock_t lpi_list_lock; - struct list_head lpi_list_head; - int lpi_list_count; - - /* LPI translation cache */ - struct list_head lpi_translation_cache; +#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 + struct xarray lpi_xa; /* used by vgic-debug */ struct vgic_state_iter *iter; @@ -304,7 +333,7 @@ struct vgic_cpu { struct vgic_v3_cpu_if vgic_v3; }; - struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + struct vgic_irq *private_irqs; raw_spinlock_t ap_list_lock; /* Protects the ap_list */ @@ -322,11 +351,13 @@ struct vgic_cpu { */ struct vgic_io_device rd_iodev; struct vgic_redist_region *rdreg; + u32 rdreg_index; + atomic_t syncr_busy; /* Contains the attributes and gpa of the LPI pending tables. */ u64 pendbaser; - - bool lpis_enabled; + /* GICR_CTLR.{ENABLE_LPIS,RWP} */ + atomic_t ctlr; /* Cache guest priority bits */ u32 num_pri_bits; @@ -338,9 +369,10 @@ struct vgic_cpu { extern struct static_key_false vgic_v2_cpuif_trap; extern struct static_key_false vgic_v3_cpuif_trap; -int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); +int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr); void kvm_vgic_early_init(struct kvm *kvm); int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); +int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu); int kvm_vgic_create(struct kvm *kvm, u32 type); void kvm_vgic_destroy(struct kvm *kvm); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -348,18 +380,22 @@ int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); -int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, - bool level, void *owner); +int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, - u32 vintid, bool (*get_input_level)(int vindid)); + u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); +int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu); -void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); + +u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu); +u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu); +u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) @@ -398,10 +434,16 @@ struct kvm_kernel_irq_routing_entry; int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, struct kvm_kernel_irq_routing_entry *irq_entry); -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, - struct kvm_kernel_irq_routing_entry *irq_entry); +int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq); int vgic_v4_load(struct kvm_vcpu *vcpu); -int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db); +void vgic_v4_commit(struct kvm_vcpu *vcpu); +int vgic_v4_put(struct kvm_vcpu *vcpu); + +bool vgic_state_is_nested(struct kvm_vcpu *vcpu); + +/* CPU HP callbacks */ +void kvm_vgic_cpu_up(void); +void kvm_vgic_cpu_down(void); #endif /* __KVM_ARM_VGIC_H */ diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h index d75fc4365746..56619e33251e 100644 --- a/include/kvm/iodev.h +++ b/include/kvm/iodev.h @@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, : -EOPNOTSUPP; } -static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) -{ - if (dev->ops->destructor) - dev->ops->destructor(dev); -} - #endif /* __KVM_IODEV_H__ */ |