diff options
Diffstat (limited to 'arch/arm64/include/asm/kvm_mmu.h')
| -rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 95 | 
1 files changed, 87 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 672c8684d5c2..9679067a1574 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -173,6 +173,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)  	return pmd;  } +static inline pte_t kvm_s2pte_mkexec(pte_t pte) +{ +	pte_val(pte) &= ~PTE_S2_XN; +	return pte; +} + +static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) +{ +	pmd_val(pmd) &= ~PMD_S2_XN; +	return pmd; +} +  static inline void kvm_set_s2pte_readonly(pte_t *pte)  {  	pteval_t old_pteval, pteval; @@ -191,6 +203,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)  	return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;  } +static inline bool kvm_s2pte_exec(pte_t *pte) +{ +	return !(pte_val(*pte) & PTE_S2_XN); +} +  static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)  {  	kvm_set_s2pte_readonly((pte_t *)pmd); @@ -201,6 +218,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)  	return kvm_s2pte_readonly((pte_t *)pmd);  } +static inline bool kvm_s2pmd_exec(pmd_t *pmd) +{ +	return !(pmd_val(*pmd) & PMD_S2_XN); +} +  static inline bool kvm_page_empty(void *ptr)  {  	struct page *ptr_page = virt_to_page(ptr); @@ -230,21 +252,25 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)  	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;  } -static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, -					       kvm_pfn_t pfn, -					       unsigned long size) +static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)  {  	void *va = page_address(pfn_to_page(pfn));  	kvm_flush_dcache_to_poc(va, size); +} +static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, +						  unsigned long size) +{  	if (icache_is_aliasing()) {  		/* any kind of VIPT cache */  		__flush_icache_all();  	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {  		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ -		flush_icache_range((unsigned long)va, -				   (unsigned long)va + size); +		void *va = page_address(pfn_to_page(pfn)); + +		invalidate_icache_range((unsigned long)va, +					(unsigned long)va + size);  	}  } @@ -273,15 +299,26 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);  static inline bool __kvm_cpu_uses_extended_idmap(void)  { -	return __cpu_uses_extended_idmap(); +	return __cpu_uses_extended_idmap_level();  } +static inline unsigned long __kvm_idmap_ptrs_per_pgd(void) +{ +	return idmap_ptrs_per_pgd; +} + +/* + * Can't use pgd_populate here, because the extended idmap adds an extra level + * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended + * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4. + */  static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,  				       pgd_t *hyp_pgd,  				       pgd_t *merged_hyp_pgd,  				       unsigned long hyp_idmap_start)  {  	int idmap_idx; +	u64 pgd_addr;  	/*  	 * Use the first entry to access the HYP mappings. It is @@ -289,7 +326,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,  	 * extended idmap.  	 */  	VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); -	merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); +	pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd)); +	merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);  	/*  	 * Create another extended level entry that points to the boot HYP map, @@ -299,7 +337,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,  	 */  	idmap_idx = hyp_idmap_start >> VA_BITS;  	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); -	merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); +	pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd)); +	merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);  }  static inline unsigned int kvm_get_vmid_bits(void) @@ -309,5 +348,45 @@ static inline unsigned int kvm_get_vmid_bits(void)  	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;  } +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include <asm/mmu.h> + +static inline void *kvm_get_hyp_vector(void) +{ +	struct bp_hardening_data *data = arm64_get_bp_hardening_data(); +	void *vect = kvm_ksym_ref(__kvm_hyp_vector); + +	if (data->fn) { +		vect = __bp_harden_hyp_vecs_start + +		       data->hyp_vectors_slot * SZ_2K; + +		if (!has_vhe()) +			vect = lm_alias(vect); +	} + +	return vect; +} + +static inline int kvm_map_vectors(void) +{ +	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start), +				   kvm_ksym_ref(__bp_harden_hyp_vecs_end), +				   PAGE_HYP_EXEC); +} + +#else +static inline void *kvm_get_hyp_vector(void) +{ +	return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ +	return 0; +} +#endif + +#define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr) +  #endif /* __ASSEMBLY__ */  #endif /* __ARM64_KVM_MMU_H__ */  | 
