aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h636
1 files changed, 121 insertions, 515 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 64ead8091248..2f66645587a2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -298,15 +298,15 @@ static inline int is_module_addr(void *addr)
/*
* Segment table entry encoding (R = read-only, I = invalid, y = young bit):
- * dy..R...I...wr
+ * dy..R...I...rw
* prot-none, clean, old 00..1...1...00
* prot-none, clean, young 01..1...1...00
* prot-none, dirty, old 10..1...1...00
* prot-none, dirty, young 11..1...1...00
- * read-only, clean, old 00..1...1...01
- * read-only, clean, young 01..1...0...01
- * read-only, dirty, old 10..1...1...01
- * read-only, dirty, young 11..1...0...01
+ * read-only, clean, old 00..1...1...10
+ * read-only, clean, young 01..1...0...10
+ * read-only, dirty, old 10..1...1...10
+ * read-only, dirty, young 11..1...0...10
* read-write, clean, old 00..1...1...11
* read-write, clean, young 01..1...0...11
* read-write, dirty, old 10..0...1...11
@@ -520,15 +520,6 @@ static inline int pmd_bad(pmd_t pmd)
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
-#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty);
-
-#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
@@ -631,208 +622,6 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
return pmd;
}
-static inline pgste_t pgste_get_lock(pte_t *ptep)
-{
- unsigned long new = 0;
-#ifdef CONFIG_PGSTE
- unsigned long old;
-
- preempt_disable();
- asm(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " nihh %0,0xff7f\n" /* clear PCL bit in old */
- " oihh %1,0x0080\n" /* set PCL bit in new */
- " csg %0,%1,%2\n"
- " jl 0b\n"
- : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
- : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
-#endif
- return __pgste(new);
-}
-
-static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
- asm(
- " nihh %1,0xff7f\n" /* clear PCL bit */
- " stg %1,%0\n"
- : "=Q" (ptep[PTRS_PER_PTE])
- : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
- : "cc", "memory");
- preempt_enable();
-#endif
-}
-
-static inline pgste_t pgste_get(pte_t *ptep)
-{
- unsigned long pgste = 0;
-#ifdef CONFIG_PGSTE
- pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
-#endif
- return __pgste(pgste);
-}
-
-static inline void pgste_set(pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
- *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
-#endif
-}
-
-static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
- struct mm_struct *mm)
-{
-#ifdef CONFIG_PGSTE
- unsigned long address, bits, skey;
-
- if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
- return pgste;
- address = pte_val(*ptep) & PAGE_MASK;
- skey = (unsigned long) page_get_storage_key(address);
- bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
- /* Transfer page changed & referenced bit to guest bits in pgste */
- pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
- /* Copy page access key and fetch protection bit to pgste */
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
- pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
-#endif
- return pgste;
-
-}
-
-static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
- struct mm_struct *mm)
-{
-#ifdef CONFIG_PGSTE
- unsigned long address;
- unsigned long nkey;
-
- if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
- return;
- VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
- address = pte_val(entry) & PAGE_MASK;
- /*
- * Set page access key and fetch protection bit from pgste.
- * The guest C/R information is still in the PGSTE, set real
- * key C/R to 0.
- */
- nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
- nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
- page_set_storage_key(address, nkey, 0);
-#endif
-}
-
-static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
-{
- if ((pte_val(entry) & _PAGE_PRESENT) &&
- (pte_val(entry) & _PAGE_WRITE) &&
- !(pte_val(entry) & _PAGE_INVALID)) {
- if (!MACHINE_HAS_ESOP) {
- /*
- * Without enhanced suppression-on-protection force
- * the dirty bit on for all writable ptes.
- */
- pte_val(entry) |= _PAGE_DIRTY;
- pte_val(entry) &= ~_PAGE_PROTECT;
- }
- if (!(pte_val(entry) & _PAGE_PROTECT))
- /* This pte allows write access, set user-dirty */
- pgste_val(pgste) |= PGSTE_UC_BIT;
- }
- *ptep = entry;
- return pgste;
-}
-
-/**
- * struct gmap_struct - guest address space
- * @crst_list: list of all crst tables used in the guest address space
- * @mm: pointer to the parent mm_struct
- * @guest_to_host: radix tree with guest to host address translation
- * @host_to_guest: radix tree with pointer to segment table entries
- * @guest_table_lock: spinlock to protect all entries in the guest page table
- * @table: pointer to the page directory
- * @asce: address space control element for gmap page table
- * @pfault_enabled: defines if pfaults are applicable for the guest
- */
-struct gmap {
- struct list_head list;
- struct list_head crst_list;
- struct mm_struct *mm;
- struct radix_tree_root guest_to_host;
- struct radix_tree_root host_to_guest;
- spinlock_t guest_table_lock;
- unsigned long *table;
- unsigned long asce;
- unsigned long asce_end;
- void *private;
- bool pfault_enabled;
-};
-
-/**
- * struct gmap_notifier - notify function block for page invalidation
- * @notifier_call: address of callback function
- */
-struct gmap_notifier {
- struct list_head list;
- void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
-};
-
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
-void gmap_free(struct gmap *gmap);
-void gmap_enable(struct gmap *gmap);
-void gmap_disable(struct gmap *gmap);
-int gmap_map_segment(struct gmap *gmap, unsigned long from,
- unsigned long to, unsigned long len);
-int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
-unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
-unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
-int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
-int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
-void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
-void __gmap_zap(struct gmap *, unsigned long gaddr);
-bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
-
-
-void gmap_register_ipte_notifier(struct gmap_notifier *);
-void gmap_unregister_ipte_notifier(struct gmap_notifier *);
-int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
-void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
-
-static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
- if (pgste_val(pgste) & PGSTE_IN_BIT) {
- pgste_val(pgste) &= ~PGSTE_IN_BIT;
- gmap_do_ipte_notify(mm, addr, ptep);
- }
-#endif
- return pgste;
-}
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t entry)
-{
- pgste_t pgste;
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
- pgste_set_key(ptep, pgste, entry, mm);
- pgste = pgste_set_pte(ptep, pgste, entry);
- pgste_set_unlock(ptep, pgste);
- } else {
- *ptep = entry;
- }
-}
-
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
@@ -998,96 +787,30 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
} while (nr != 255);
}
-static inline void ptep_flush_direct(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
-{
- int active, count;
-
- if (pte_val(*ptep) & _PAGE_INVALID)
- return;
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __ptep_ipte_local(address, ptep);
- else
- __ptep_ipte(address, ptep);
- atomic_sub(0x10000, &mm->context.attach_count);
-}
-
-static inline void ptep_flush_lazy(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
-{
- int active, count;
-
- if (pte_val(*ptep) & _PAGE_INVALID)
- return;
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if ((count & 0xffff) <= active) {
- pte_val(*ptep) |= _PAGE_INVALID;
- mm->context.flush_mm = 1;
- } else
- __ptep_ipte(address, ptep);
- atomic_sub(0x10000, &mm->context.attach_count);
-}
-
/*
- * Get (and clear) the user dirty bit for a pte.
+ * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
+ * both clear the TLB for the unmapped pte. The reason is that
+ * ptep_get_and_clear is used in common code (e.g. change_pte_range)
+ * to modify an active pte. The sequence is
+ * 1) ptep_get_and_clear
+ * 2) set_pte_at
+ * 3) flush_tlb_range
+ * On s390 the tlb needs to get flushed with the modification of the pte
+ * if the pte is active. The only way how this can be implemented is to
+ * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
+ * is a nop.
*/
-static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep)
-{
- pgste_t pgste;
- pte_t pte;
- int dirty;
-
- if (!mm_has_pgste(mm))
- return 0;
- pgste = pgste_get_lock(ptep);
- dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
- pgste_val(pgste) &= ~PGSTE_UC_BIT;
- pte = *ptep;
- if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
- pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
- __ptep_ipte(addr, ptep);
- if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
- pte_val(pte) |= _PAGE_PROTECT;
- else
- pte_val(pte) |= _PAGE_INVALID;
- *ptep = pte;
- }
- pgste_set_unlock(ptep, pgste);
- return dirty;
-}
+pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
+pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- pgste_t pgste;
- pte_t pte, oldpte;
- int young;
-
- if (mm_has_pgste(vma->vm_mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
- }
-
- oldpte = pte = *ptep;
- ptep_flush_direct(vma->vm_mm, addr, ptep);
- young = pte_young(pte);
- pte = pte_mkold(pte);
-
- if (mm_has_pgste(vma->vm_mm)) {
- pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
- pgste = pgste_set_pte(ptep, pgste, pte);
- pgste_set_unlock(ptep, pgste);
- } else
- *ptep = pte;
+ pte_t pte = *ptep;
- return young;
+ pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ return pte_young(pte);
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
@@ -1097,104 +820,22 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
-/*
- * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
- * both clear the TLB for the unmapped pte. The reason is that
- * ptep_get_and_clear is used in common code (e.g. change_pte_range)
- * to modify an active pte. The sequence is
- * 1) ptep_get_and_clear
- * 2) set_pte_at
- * 3) flush_tlb_range
- * On s390 the tlb needs to get flushed with the modification of the pte
- * if the pte is active. The only way how this can be implemented is to
- * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
- * is a nop.
- */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
- pgste_t pgste;
- pte_t pte;
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(mm, address, ptep, pgste);
- }
-
- pte = *ptep;
- ptep_flush_lazy(mm, address, ptep);
- pte_val(*ptep) = _PAGE_INVALID;
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_update_all(&pte, pgste, mm);
- pgste_set_unlock(ptep, pgste);
- }
- return pte;
+ return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep)
-{
- pgste_t pgste;
- pte_t pte;
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste_ipte_notify(mm, address, ptep, pgste);
- }
-
- pte = *ptep;
- ptep_flush_lazy(mm, address, ptep);
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_update_all(&pte, pgste, mm);
- pgste_set(ptep, pgste);
- }
- return pte;
-}
-
-static inline void ptep_modify_prot_commit(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep, pte_t pte)
-{
- pgste_t pgste;
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_get(ptep);
- pgste_set_key(ptep, pgste, pte, mm);
- pgste = pgste_set_pte(ptep, pgste, pte);
- pgste_set_unlock(ptep, pgste);
- } else
- *ptep = pte;
-}
+pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
+void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
- pgste_t pgste;
- pte_t pte;
-
- if (mm_has_pgste(vma->vm_mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
- }
-
- pte = *ptep;
- ptep_flush_direct(vma->vm_mm, address, ptep);
- pte_val(*ptep) = _PAGE_INVALID;
-
- if (mm_has_pgste(vma->vm_mm)) {
- if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
- _PGSTE_GPS_USAGE_UNUSED)
- pte_val(pte) |= _PAGE_UNUSED;
- pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
- pgste_set_unlock(ptep, pgste);
- }
- return pte;
+ return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
}
/*
@@ -1206,80 +847,66 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
*/
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
- unsigned long address,
+ unsigned long addr,
pte_t *ptep, int full)
{
- pgste_t pgste;
- pte_t pte;
-
- if (!full && mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(mm, address, ptep, pgste);
- }
-
- pte = *ptep;
- if (!full)
- ptep_flush_lazy(mm, address, ptep);
- pte_val(*ptep) = _PAGE_INVALID;
-
- if (!full && mm_has_pgste(mm)) {
- pgste = pgste_update_all(&pte, pgste, mm);
- pgste_set_unlock(ptep, pgste);
+ if (full) {
+ pte_t pte = *ptep;
+ *ptep = __pte(_PAGE_INVALID);
+ return pte;
}
- return pte;
+ return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
- pgste_t pgste;
pte_t pte = *ptep;
- if (pte_write(pte)) {
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(mm, address, ptep, pgste);
- }
-
- ptep_flush_lazy(mm, address, ptep);
- pte = pte_wrprotect(pte);
-
- if (mm_has_pgste(mm)) {
- pgste = pgste_set_pte(ptep, pgste, pte);
- pgste_set_unlock(ptep, pgste);
- } else
- *ptep = pte;
- }
- return pte;
+ if (pte_write(pte))
+ ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
+ unsigned long addr, pte_t *ptep,
pte_t entry, int dirty)
{
- pgste_t pgste;
- pte_t oldpte;
-
- oldpte = *ptep;
- if (pte_same(oldpte, entry))
+ if (pte_same(*ptep, entry))
return 0;
- if (mm_has_pgste(vma->vm_mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
- }
+ ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
+ return 1;
+}
- ptep_flush_direct(vma->vm_mm, address, ptep);
+/*
+ * Additional functions to handle KVM guest page tables
+ */
+void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry);
+void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep , int reset);
+void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+
+bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char key, bool nq);
+unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
- if (mm_has_pgste(vma->vm_mm)) {
- if (pte_val(oldpte) & _PAGE_INVALID)
- pgste_set_key(ptep, pgste, entry, vma->vm_mm);
- pgste = pgste_set_pte(ptep, pgste, entry);
- pgste_set_unlock(ptep, pgste);
- } else
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry)
+{
+ if (mm_has_pgste(mm))
+ ptep_set_pte_at(mm, addr, ptep, entry);
+ else
*ptep = entry;
- return 1;
}
/*
@@ -1476,54 +1103,51 @@ static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
: "cc" );
}
-static inline void pmdp_flush_direct(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- int active, count;
+pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
+pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
- if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
- return;
- if (!MACHINE_HAS_IDTE) {
- __pmdp_csp(pmdp);
- return;
- }
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pmdp_idte_local(address, pmdp);
- else
- __pmdp_idte(address, pmdp);
- atomic_sub(0x10000, &mm->context.attach_count);
-}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void pmdp_flush_lazy(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t entry, int dirty)
{
- int active, count;
+ VM_BUG_ON(addr & ~HPAGE_MASK);
- if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
- return;
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if ((count & 0xffff) <= active) {
- pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
- mm->context.flush_mm = 1;
- } else if (MACHINE_HAS_IDTE)
- __pmdp_idte(address, pmdp);
- else
- __pmdp_csp(pmdp);
- atomic_sub(0x10000, &mm->context.attach_count);
+ entry = pmd_mkyoung(entry);
+ if (dirty)
+ entry = pmd_mkdirty(entry);
+ if (pmd_val(*pmdp) == pmd_val(entry))
+ return 0;
+ pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
+ return 1;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable);
+ pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
+ return pmd_young(pmd);
+}
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ VM_BUG_ON(addr & ~HPAGE_MASK);
+ return pmdp_test_and_clear_young(vma, addr, pmdp);
+}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
@@ -1539,66 +1163,48 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
return pmd;
}
-#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- pmd_t pmd;
-
- pmd = *pmdp;
- pmdp_flush_direct(vma->vm_mm, address, pmdp);
- *pmdp = pmd_mkold(pmd);
- return pmd_young(pmd);
-}
-
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
+ unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = *pmdp;
-
- pmdp_flush_direct(mm, address, pmdp);
- pmd_clear(pmdp);
- return pmd;
+ return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address,
+ unsigned long addr,
pmd_t *pmdp, int full)
{
- pmd_t pmd = *pmdp;
-
- if (!full)
- pmdp_flush_lazy(mm, address, pmdp);
- pmd_clear(pmdp);
- return pmd;
+ if (full) {
+ pmd_t pmd = *pmdp;
+ *pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
+ return pmd;
+ }
+ return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
}
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
+ unsigned long addr, pmd_t *pmdp)
{
- return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+ return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
}
#define __HAVE_ARCH_PMDP_INVALIDATE
static inline void pmdp_invalidate(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
+ unsigned long addr, pmd_t *pmdp)
{
- pmdp_flush_direct(vma->vm_mm, address, pmdp);
+ pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
+ unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
- if (pmd_write(pmd)) {
- pmdp_flush_direct(mm, address, pmdp);
- set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
- }
+ if (pmd_write(pmd))
+ pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
}
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,