aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/barrier.h7
-rw-r--r--arch/arm/include/asm/dma-mapping.h8
-rw-r--r--arch/arm/include/asm/gpio.h2
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h5
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h12
-rw-r--r--arch/arm/include/asm/io.h8
-rw-r--r--arch/arm/include/asm/leds.h50
-rw-r--r--arch/arm/include/asm/mach/arch.h7
-rw-r--r--arch/arm/include/asm/mach/map.h8
-rw-r--r--arch/arm/include/asm/mach/pci.h13
-rw-r--r--arch/arm/include/asm/memory.h11
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/perf_event.h9
-rw-r--r--arch/arm/include/asm/pgtable.h42
-rw-r--r--arch/arm/include/asm/pmu.h77
-rw-r--r--arch/arm/include/asm/sched_clock.h2
-rw-r--r--arch/arm/include/asm/smp.h48
-rw-r--r--arch/arm/include/asm/timex.h4
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/vfpmacros.h2
23 files changed, 210 insertions, 179 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0d..5c8b3bf4d825 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
.size \name , . - \name
.endm
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcccs \tmp, \tmp, \limit
+ bcs \bad
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 05112380dc53..8dcd9c702d90 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -44,10 +44,9 @@
#define rmb() dsb()
#define wmb() mb()
#else
-#include <asm/memory.h>
-#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
-#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
-#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
#endif
#ifndef CONFIG_SMP
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2ae842df4551..23004847bb05 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
+extern struct dma_map_ops arm_coherent_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
@@ -203,6 +204,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
}
/*
+ * This can be called during early boot to increase the size of the atomic
+ * coherent DMA pool above the default value of 256KiB. It must be called
+ * before postcore_initcall.
+ */
+extern void __init init_dma_coherent_pool_size(unsigned long size);
+
+/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
index c402e9b31f4c..477e0206e016 100644
--- a/arch/arm/include/asm/gpio.h
+++ b/arch/arm/include/asm/gpio.h
@@ -6,7 +6,9 @@
#endif
/* not all ARM platforms necessarily support this API ... */
+#ifdef CONFIG_NEED_MACH_GPIO_H
#include <mach/gpio.h>
+#endif
#ifndef __ARM_GPIOLIB_COMPLEX
/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
index 538f17ca905b..295e2e40151b 100644
--- a/arch/arm/include/asm/hardware/cache-tauros2.h
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -8,4 +8,7 @@
* warranty of any kind, whether express or implied.
*/
-extern void __init tauros2_init(void);
+#define CACHE_TAUROS2_PREFETCH_ON (1 << 0)
+#define CACHE_TAUROS2_LINEFILL_BURST8 (1 << 1)
+
+extern void __init tauros2_init(unsigned int features);
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 2ff2c75a4639..02fe2fbe2477 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -217,18 +217,8 @@ extern int iop3xx_get_init_atu(void);
#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
-#define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000
#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
-#define IOP3XX_PCI_LOWER_IO_VA 0xfe000000
-#define IOP3XX_PCI_LOWER_IO_BA 0x90000000
-#define IOP3XX_PCI_UPPER_IO_PA (IOP3XX_PCI_LOWER_IO_PA +\
- IOP3XX_PCI_IO_WINDOW_SIZE - 1)
-#define IOP3XX_PCI_UPPER_IO_VA (IOP3XX_PCI_LOWER_IO_VA +\
- IOP3XX_PCI_IO_WINDOW_SIZE - 1)
-#define IOP3XX_PCI_IO_PHYS_TO_VIRT(addr) (((u32) (addr) -\
- IOP3XX_PCI_LOWER_IO_PA) +\
- IOP3XX_PCI_LOWER_IO_VA)
-
+#define IOP3XX_PCI_LOWER_IO_BA 0x00000000
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 815c669fec0a..8f4db67533e5 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -113,11 +113,19 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#define __iowmb() do { } while (0)
#endif
+/* PCI fixed i/o mapping */
+#define PCI_IO_VIRT_BASE 0xfee00000
+
+extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+
/*
* Now, pick up the machine-defined IO definitions
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
+#elif defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
#endif
diff --git a/arch/arm/include/asm/leds.h b/arch/arm/include/asm/leds.h
deleted file mode 100644
index c545739f39b7..000000000000
--- a/arch/arm/include/asm/leds.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * arch/arm/include/asm/leds.h
- *
- * Copyright (C) 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Event-driven interface for LEDs on machines
- * Added led_start and led_stop- Alex Holden, 28th Dec 1998.
- */
-#ifndef ASM_ARM_LEDS_H
-#define ASM_ARM_LEDS_H
-
-
-typedef enum {
- led_idle_start,
- led_idle_end,
- led_timer,
- led_start,
- led_stop,
- led_claim, /* override idle & timer leds */
- led_release, /* restore idle & timer leds */
- led_start_timer_mode,
- led_stop_timer_mode,
- led_green_on,
- led_green_off,
- led_amber_on,
- led_amber_off,
- led_red_on,
- led_red_off,
- led_blue_on,
- led_blue_off,
- /*
- * I want this between led_timer and led_start, but
- * someone has decided to export this to user space
- */
- led_halted
-} led_event_t;
-
-/* Use this routine to handle LEDs */
-
-#ifdef CONFIG_LEDS
-extern void (*leds_event)(led_event_t);
-#else
-#define leds_event(e)
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 0b1c94b8c652..917d4fcfd9b4 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -14,6 +14,12 @@ struct tag;
struct meminfo;
struct sys_timer;
struct pt_regs;
+struct smp_operations;
+#ifdef CONFIG_SMP
+#define smp_ops(ops) (&(ops))
+#else
+#define smp_ops(ops) (struct smp_operations *)NULL
+#endif
struct machine_desc {
unsigned int nr; /* architecture number */
@@ -35,6 +41,7 @@ struct machine_desc {
unsigned char reserve_lp1 :1; /* never has lp1 */
unsigned char reserve_lp2 :1; /* never has lp2 */
char restart_mode; /* default restart mode */
+ struct smp_operations *smp; /* SMP operations */
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index a6efcdd6fd25..195ac2f9d3d3 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -9,6 +9,9 @@
*
* Page table mapping constructs and function prototypes
*/
+#ifndef __ASM_MACH_MAP_H
+#define __ASM_MACH_MAP_H
+
#include <asm/io.h>
struct map_desc {
@@ -34,6 +37,8 @@ struct map_desc {
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
+extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
+ void *caller);
struct mem_type;
extern const struct mem_type *get_mem_type(unsigned int type);
@@ -44,4 +49,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype);
#else
#define iotable_init(map,num) do { } while (0)
+#define vm_reserve_area_early(a,s,c) do { } while (0)
+#endif
+
#endif
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 26c511fddf8f..db9fedb57f2c 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -11,6 +11,8 @@
#ifndef __ASM_MACH_PCI_H
#define __ASM_MACH_PCI_H
+#include <linux/ioport.h>
+
struct pci_sys_data;
struct pci_ops;
struct pci_bus;
@@ -42,6 +44,8 @@ struct pci_sys_data {
unsigned long io_offset; /* bus->cpu IO mapping offset */
struct pci_bus *bus; /* PCI bus */
struct list_head resources; /* root bus resources (apertures) */
+ struct resource io_res;
+ char io_res_name[12];
/* Bridge swizzling */
u8 (*swizzle)(struct pci_dev *, u8 *);
/* IRQ mapping */
@@ -55,6 +59,15 @@ struct pci_sys_data {
void pci_common_init(struct hw_pci *);
/*
+ * Setup early fixed I/O mapping.
+ */
+#if defined(CONFIG_PCI)
+extern void pci_map_io_early(unsigned long pfn);
+#else
+static inline void pci_map_io_early(unsigned long pfn) {}
+#endif
+
+/*
* PCI controllers
*/
extern struct pci_ops iop3xx_ops;
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f1..73cf03aa981e 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
+#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif
#endif
+#ifndef __ASSEMBLY__
+
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
@@ -272,14 +275,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
-/*
- * Optional coherency support. Currently used only by selected
- * Intel XSC3-based systems.
- */
-#ifndef arch_is_coherent
-#define arch_is_coherent() 0
-#endif
-
#endif
#include <asm-generic/memory_model.h>
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ecf901902e44..812a4944e783 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -19,7 +19,7 @@
#ifndef CONFIG_MMU
-#include "page-nommu.h"
+#include <asm/page-nommu.h>
#else
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index e074948d8143..625cd621a436 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,6 +12,13 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-/* Nothing to see here... */
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d71e7d..08c12312a1f9 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -16,7 +16,7 @@
#ifndef CONFIG_MMU
#include <asm-generic/4level-fixup.h>
-#include "pgtable-nommu.h"
+#include <asm/pgtable-nommu.h>
#else
@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
+#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
+#define pte_special(pte) (0)
+
+#define pte_present_user(pte) \
+ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+ (L_PTE_PRESENT | L_PTE_USER))
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- if (addr >= TASK_SIZE)
- set_pte_ext(ptep, pteval, 0);
- else {
+ unsigned long ext = 0;
+
+ if (addr < TASK_SIZE && pte_present_user(pteval)) {
__sync_icache_dcache(pteval);
- set_pte_ext(ptep, pteval, PTE_EXT_NG);
+ ext |= PTE_EXT_NG;
}
-}
-#define pte_none(pte) (!pte_val(pte))
-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
-#define pte_special(pte) (0)
-
-#define pte_present_user(pte) \
- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
- (L_PTE_PRESENT | L_PTE_USER))
+ set_pte_ext(ptep, pteval, ext);
+}
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset --------------------> <- type --> 0 0 0
+ * <--------------- offset ----------------------> < type -> 0 0 0
*
- * This gives us up to 63 swap files and 32GB per swap file. Note that
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
#define __SWP_TYPE_SHIFT 3
-#define __SWP_TYPE_BITS 6
+#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 4432305f4a2a..a26170dce02e 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -16,69 +16,30 @@
#include <linux/perf_event.h>
/*
- * Types of PMUs that can be accessed directly and require mutual
- * exclusion between profiling tools.
- */
-enum arm_pmu_type {
- ARM_PMU_DEVICE_CPU = 0,
- ARM_NUM_PMU_DEVICES,
-};
-
-/*
* struct arm_pmu_platdata - ARM PMU platform data
*
* @handle_irq: an optional handler which will be called from the
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
- * @enable_irq: an optional handler which will be called after
- * request_irq and be used to handle some platform specific
- * irq enablement
- * @disable_irq: an optional handler which will be called before
- * free_irq and be used to handle some platform specific
- * irq disablement
+ * @runtime_resume: an optional handler which will be called by the
+ * runtime PM framework following a call to pm_runtime_get().
+ * Note that if pm_runtime_get() is called more than once in
+ * succession this handler will only be called once.
+ * @runtime_suspend: an optional handler which will be called by the
+ * runtime PM framework following a call to pm_runtime_put().
+ * Note that if pm_runtime_get() is called more than once in
+ * succession this handler will only be called following the
+ * final call to pm_runtime_put() that actually disables the
+ * hardware.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
- void (*enable_irq)(int irq);
- void (*disable_irq)(int irq);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
};
-#ifdef CONFIG_CPU_HAS_PMU
-
-/**
- * reserve_pmu() - reserve the hardware performance counters
- *
- * Reserve the hardware performance counters in the system for exclusive use.
- * Returns 0 on success or -EBUSY if the lock is already held.
- */
-extern int
-reserve_pmu(enum arm_pmu_type type);
-
-/**
- * release_pmu() - Relinquish control of the performance counters
- *
- * Release the performance counters and allow someone else to use them.
- */
-extern void
-release_pmu(enum arm_pmu_type type);
-
-#else /* CONFIG_CPU_HAS_PMU */
-
-#include <linux/err.h>
-
-static inline int
-reserve_pmu(enum arm_pmu_type type)
-{
- return -ENODEV;
-}
-
-static inline void
-release_pmu(enum arm_pmu_type type) { }
-
-#endif /* CONFIG_CPU_HAS_PMU */
-
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
@@ -103,7 +64,6 @@ struct pmu_hw_events {
struct arm_pmu {
struct pmu pmu;
- enum arm_pmu_type type;
cpumask_t active_irqs;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
@@ -118,6 +78,8 @@ struct arm_pmu {
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
+ int (*request_irq)(irq_handler_t handler);
+ void (*free_irq)(void);
int (*map_event)(struct perf_event *event);
int num_events;
atomic_t active_events;
@@ -129,7 +91,9 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
+extern const struct dev_pm_ops armpmu_dev_pm_ops;
+
+int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
@@ -139,6 +103,13 @@ int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
+
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index e3f757263438..05b8e82ec9f5 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -10,5 +10,7 @@
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
+extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate);
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index ae29293270a3..2e3be16c6766 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -60,15 +60,6 @@ extern int boot_secondary(unsigned int cpu, struct task_struct *);
*/
asmlinkage void secondary_start_kernel(void);
-/*
- * Perform platform specific initialisation of the specified CPU.
- */
-extern void platform_secondary_init(unsigned int cpu);
-
-/*
- * Initialize cpu_possible map, and enable coherency
- */
-extern void platform_smp_prepare_cpus(unsigned int);
/*
* Initial data for bringing up a secondary CPU.
@@ -79,18 +70,47 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
+extern volatile int pen_release;
extern int __cpu_disable(void);
-extern int platform_cpu_disable(unsigned int cpu);
extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void);
-extern void platform_cpu_die(unsigned int cpu);
-extern int platform_cpu_kill(unsigned int cpu);
-extern void platform_cpu_enable(unsigned int cpu);
-
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+struct smp_operations {
+#ifdef CONFIG_SMP
+ /*
+ * Setup the set of possible CPUs (via set_cpu_possible)
+ */
+ void (*smp_init_cpus)(void);
+ /*
+ * Initialize cpu_possible map, and enable coherency
+ */
+ void (*smp_prepare_cpus)(unsigned int max_cpus);
+
+ /*
+ * Perform platform specific initialisation of the specified CPU.
+ */
+ void (*smp_secondary_init)(unsigned int cpu);
+ /*
+ * Boot a secondary CPU, and assign it the specified idle task.
+ * This also gives us the initial stack to use for this CPU.
+ */
+ int (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_kill)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+ int (*cpu_disable)(unsigned int cpu);
+#endif
+#endif
+};
+
+/*
+ * set platform specific SMP operations
+ */
+extern void smp_set_ops(struct smp_operations *);
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index ce119442277c..963342acebb7 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -13,7 +13,11 @@
#define _ASMARM_TIMEX_H
#include <asm/arch_timer.h>
+#ifdef CONFIG_ARCH_MULTIPLATFORM
+#define CLOCK_TICK_RATE 1000000
+#else
#include <mach/timex.h>
+#endif
typedef unsigned long cycles_t;
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae7..99a19512ee26 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
+#ifdef CONFIG_ARM_LPAE
+ tlb_add_flush(tlb, addr);
+#else
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
+#endif
tlb_remove_page(tlb, pte);
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b5..77bd79f2ffdb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-#define __get_user_x(__r2,__p,__e,__s,__i...) \
+#define __GUP_CLOBBER_1 "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4 "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
- : "0" (__p) \
- : __i, "cc")
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
-#define get_user(x,p) \
+#define __get_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, 1, "lr"); \
- break; \
+ __get_user_x(__r2, __p, __e, __l, 1); \
+ break; \
case 2: \
- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, 4, "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 4); \
break; \
default: __e = __get_user_bad(); break; \
} \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
__e; \
})
+#define get_user(x,p) \
+ ({ \
+ might_fault(); \
+ __get_user_check(x,p); \
+ })
+
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__s) \
+#define __put_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
-#define put_user(x,p) \
+#define __put_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __put_user_x(__r2, __p, __e, 1); \
+ __put_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __put_user_x(__r2, __p, __e, 2); \
+ __put_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __put_user_x(__r2, __p, __e, 4); \
+ __put_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
- __put_user_x(__r2, __p, __e, 8); \
+ __put_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
+#define put_user(x,p) \
+ ({ \
+ might_fault(); \
+ __put_user_check(x,p); \
+ })
+
#else /* CONFIG_MMU */
/*
@@ -219,6 +245,7 @@ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 0cab47d4a83f..2fde5fd1acce 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -404,6 +404,7 @@
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
+ /* 378 for kcmp */
/*
* The following SWIs are ARM private.
@@ -483,6 +484,7 @@
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
+#define __IGNORE_kcmp
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index 3d5fc41ae8d3..a7aadbd9a6dd 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -5,7 +5,7 @@
*/
#include <asm/hwcap.h>
-#include "vfp.h"
+#include <asm/vfp.h>
@ Macros to allow building with old toolkits (with no VFP support)
.macro VFPFMRX, rd, sysreg, cond