aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-10 09:49:15 -0700
committerDavid S. Miller <davem@davemloft.net>2017-08-10 09:49:15 -0700
commit4d9fbf539b52810cd2903719b181ed3d3ccd861f (patch)
tree981775e1ac718a5f4ff7fb2296c92f4fc545441f /arch/sparc/mm/init_64.c
parentMerge branch 'sparc64-Use-low-latency-path-to-resume-idle-cpu' (diff)
downloadlinux-dev-4d9fbf539b52810cd2903719b181ed3d3ccd861f.tar.xz
linux-dev-4d9fbf539b52810cd2903719b181ed3d3ccd861f.zip
sparc64: Revert 16GB huge page support.
It overflows the amount of space available in the initial .text section of trap handler assembler in some configurations, resulting in build failures. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--arch/sparc/mm/init_64.c54
1 files changed, 7 insertions, 47 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index cab1510a82a0..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -325,18 +325,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
}
#ifdef CONFIG_HUGETLB_PAGE
-static void __init pud_huge_patch(void)
-{
- struct pud_huge_patch_entry *p;
- unsigned long addr;
-
- p = &__pud_huge_patch;
- addr = p->addr;
- *(unsigned int *)addr = p->insn;
-
- __asm__ __volatile__("flush %0" : : "r" (addr));
-}
-
static int __init setup_hugepagesz(char *string)
{
unsigned long long hugepage_size;
@@ -349,11 +337,6 @@ static int __init setup_hugepagesz(char *string)
hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
- case HPAGE_16GB_SHIFT:
- hv_pgsz_mask = HV_PGSZ_MASK_16GB;
- hv_pgsz_idx = HV_PGSZ_IDX_16GB;
- pud_huge_patch();
- break;
case HPAGE_2GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_2GB;
hv_pgsz_idx = HV_PGSZ_IDX_2GB;
@@ -394,7 +377,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
{
struct mm_struct *mm;
unsigned long flags;
- bool is_huge_tsb;
pte_t pte = *ptep;
if (tlb_type != hypervisor) {
@@ -412,37 +394,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
spin_lock_irqsave(&mm->context.lock, flags);
- is_huge_tsb = false;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
- unsigned long hugepage_size = PAGE_SIZE;
-
- if (is_vm_hugetlb_page(vma))
- hugepage_size = huge_page_size(hstate_vma(vma));
-
- if (hugepage_size >= PUD_SIZE) {
- unsigned long mask = 0x1ffc00000UL;
-
- /* Transfer bits [32:22] from address to resolve
- * at 4M granularity.
- */
- pte_val(pte) &= ~mask;
- pte_val(pte) |= (address & mask);
- } else if (hugepage_size >= PMD_SIZE) {
- /* We are fabricating 8MB pages using 4MB
- * real hw pages.
- */
- pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
- }
-
- if (hugepage_size >= PMD_SIZE) {
- __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
- REAL_HPAGE_SHIFT, address, pte_val(pte));
- is_huge_tsb = true;
- }
- }
+ if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
+ is_hugetlb_pmd(__pmd(pte_val(pte)))) {
+ /* We are fabricating 8MB pages using 4MB real hw pages. */
+ pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
+ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ address, pte_val(pte));
+ } else
#endif
- if (!is_huge_tsb)
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte));