aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/Kbuild2
-rw-r--r--arch/tile/include/asm/div64.h16
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/tile/mm/pgtable.c45
4 files changed, 17 insertions, 48 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 2d1f5638974c..aa48b6eaff2d 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -4,8 +4,6 @@ header-y += ../arch/
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
-generic-y += cputime.h
-generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h
new file mode 100644
index 000000000000..9f765cdf09a5
--- /dev/null
+++ b/arch/tile/include/asm/div64.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_TILE_DIV64_H
+#define _ASM_TILE_DIV64_H
+
+#include <linux/types.h>
+
+#ifdef __tilegx__
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return __insn_mul_lu_lu(a, b);
+}
+#define mul_u32_u32 mul_u32_u32
+#endif
+
+#include <asm-generic/div64.h>
+
+#endif /* _ASM_TILE_DIV64_H */
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b7011667c..e279572824b1 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs regs;
+ struct pt_regs regs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
sizeof(regs));
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 7cc6ee7f1a58..492a7361e58e 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -36,51 +36,6 @@
#define K(x) ((x) << (PAGE_SHIFT-10))
-/*
- * The normal show_free_areas() is too verbose on Tile, with dozens
- * of processors and often four NUMA zones each with high and lowmem.
- */
-void show_mem(unsigned int filter)
-{
- struct zone *zone;
-
- pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
- (global_node_page_state(NR_ACTIVE_ANON) +
- global_node_page_state(NR_ACTIVE_FILE)),
- (global_node_page_state(NR_INACTIVE_ANON) +
- global_node_page_state(NR_INACTIVE_FILE)),
- global_node_page_state(NR_FILE_DIRTY),
- global_node_page_state(NR_WRITEBACK),
- global_node_page_state(NR_UNSTABLE_NFS),
- global_page_state(NR_FREE_PAGES),
- (global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE)),
- global_node_page_state(NR_FILE_MAPPED),
- global_page_state(NR_PAGETABLE),
- global_page_state(NR_BOUNCE),
- global_node_page_state(NR_FILE_PAGES),
- get_nr_swap_pages());
-
- for_each_zone(zone) {
- unsigned long flags, order, total = 0, largest_order = -1;
-
- if (!populated_zone(zone))
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order < MAX_ORDER; order++) {
- int nr = zone->free_area[order].nr_free;
- total += nr << order;
- if (nr)
- largest_order = order;
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- pr_err("Node %d %7s: %lukB (largest %luKb)\n",
- zone_to_nid(zone), zone->name,
- K(total), largest_order ? K(1UL) << largest_order : 0);
- }
-}
-
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*