aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu_pv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/mmu_pv.c')
-rw-r--r--arch/x86/xen/mmu_pv.c10
1 files changed, 1 insertions, 9 deletions
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 3273c985d3dd..cf2ade864c30 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -285,13 +285,6 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval)
__xen_set_pte(ptep, pteval);
}
-static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
- __xen_set_pte(ptep, pteval);
-}
-
pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
@@ -1149,7 +1142,7 @@ static void __init xen_pagetable_p2m_free(void)
* We could be in __ka space.
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
- * xen_start_info->shared_info they are in going to crash. Fortunatly
+ * xen_start_info->shared_info they are in going to crash. Fortunately
* we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
@@ -2105,7 +2098,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.release_pmd = xen_release_pmd_init,
.set_pte = xen_set_pte_init,
- .set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd_hyper,
.ptep_modify_prot_start = __ptep_modify_prot_start,