aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/tile/mm/homecache.c16
-rw-r--r--arch/tile/mm/hugetlbpage.c28
-rw-r--r--arch/tile/mm/init.c18
4 files changed, 14 insertions, 50 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 565e25a98334..0f61a73534e6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -442,6 +442,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index cd3387370ebb..40ca30a9fee3 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -115,7 +115,6 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
struct cpumask *cache_cpumask, *tlb_cpumask;
HV_PhysAddr cache_pa;
- char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
mb(); /* provided just to simplify "magic hypervisor" mode */
@@ -149,13 +148,12 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
asids, asidcount);
if (rc == 0)
return;
- cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
- cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
- pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
- cache_pa, cache_control, cache_cpumask, cache_buf,
- (unsigned long)tlb_va, tlb_length, tlb_pgsize,
- tlb_cpumask, tlb_buf, asids, asidcount, rc);
+ pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
+ cache_pa, cache_control, cache_cpumask,
+ cpumask_pr_args(&cache_cpumask_copy),
+ (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
+ cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
panic("Unsafe to continue.");
}
@@ -263,10 +261,6 @@ static int pte_to_home(pte_t pte)
/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
pte_t pte_set_home(pte_t pte, int home)
{
- /* Check for non-linear file mapping "PTEs" and pass them through. */
- if (pte_file(pte))
- return pte;
-
#if CHIP_HAS_MMIO()
/* Check for MMIO mappings and pass them through. */
if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 3270e0019266..8416240c322c 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return NULL;
}
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
@@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pud);
- if (page)
- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index be240cc4978d..ace32d7d3864 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -353,15 +353,13 @@ static int __init setup_ktext(char *str)
/* Neighborhood ktext pages on specified mask */
else if (cpulist_parse(str, &ktext_mask) == 0) {
- char buf[NR_CPUS * 5];
- cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
if (cpumask_weight(&ktext_mask) > 1) {
ktext_small = 1;
- pr_info("ktext: using caching neighborhood %s with small pages\n",
- buf);
+ pr_info("ktext: using caching neighborhood %*pbl with small pages\n",
+ cpumask_pr_args(&ktext_mask));
} else {
- pr_info("ktext: caching on cpu %s with one huge page\n",
- buf);
+ pr_info("ktext: caching on cpu %*pbl with one huge page\n",
+ cpumask_pr_args(&ktext_mask));
}
}
@@ -492,11 +490,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
struct cpumask bad;
cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
- if (!cpumask_empty(&bad)) {
- char buf[NR_CPUS * 5];
- cpulist_scnprintf(buf, sizeof(buf), &bad);
- pr_info("ktext: not using unavailable cpus %s\n", buf);
- }
+ if (!cpumask_empty(&bad))
+ pr_info("ktext: not using unavailable cpus %*pbl\n",
+ cpumask_pr_args(&bad));
if (cpumask_empty(&ktext_mask)) {
pr_warn("ktext: no valid cpus; caching on %d\n",
smp_processor_id());