aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/bitops.h2
-rw-r--r--include/asm-x86_64/dma-mapping.h3
-rw-r--r--include/asm-x86_64/e820.h2
-rw-r--r--include/asm-x86_64/hw_irq.h2
-rw-r--r--include/asm-x86_64/io.h2
-rw-r--r--include/asm-x86_64/io_apic.h14
-rw-r--r--include/asm-x86_64/mce.h2
-rw-r--r--include/asm-x86_64/mmzone.h18
-rw-r--r--include/asm-x86_64/mutex.h6
-rw-r--r--include/asm-x86_64/pgalloc.h5
-rw-r--r--include/asm-x86_64/pgtable.h9
-rw-r--r--include/asm-x86_64/uaccess.h14
-rw-r--r--include/asm-x86_64/vsyscall.h5
13 files changed, 41 insertions, 43 deletions
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index 8da9609070f4..d4dbbe5f7bd9 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -7,7 +7,7 @@
#include <asm/alternative.h>
-#if __GNUC__ < 4 || __GNUC_MINOR__ < 1
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
#define ADDR "=m" (*(volatile long *) addr)
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 49dbab09ef2b..d2af227f06d0 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -66,6 +66,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
extern void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index fa2086774105..6216fa3f2802 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -46,6 +46,7 @@ extern void e820_mark_nosave_regions(void);
extern void e820_print_map(char *who);
extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
extern void e820_setup_gap(void);
extern void e820_register_active_regions(int nid,
@@ -56,6 +57,7 @@ extern void finish_e820_parsing(void);
extern struct e820map e820;
extern unsigned ebda_addr, ebda_size;
+extern unsigned long nodemap_addr, nodemap_size;
#endif/*!__ASSEMBLY__*/
#endif/*__E820_HEADER*/
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 179cce755aa7..552df5f10a6d 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -91,7 +91,7 @@ extern void enable_8259A_irq(unsigned int irq);
extern int i8259A_irq_pending(unsigned int irq);
extern void make_8259A_irq(unsigned int irq);
extern void init_8259A(int aeoi);
-extern void FASTCALL(send_IPI_self(int vector));
+extern void send_IPI_self(int vector);
extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index f5d84bb7c948..de2cd9a2303a 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -100,7 +100,7 @@ __OUTS(l)
#define IO_SPACE_LIMIT 0xffff
-#if defined(__KERNEL__) && __x86_64__
+#if defined(__KERNEL__) && defined(__x86_64__)
#include <linux/vmalloc.h>
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index 561ecbfd4cb5..f4fb238c89f1 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -85,18 +85,8 @@ struct IO_APIC_route_entry {
mask : 1, /* 0: enabled, 1: disabled */
__reserved_2 : 15;
- union { struct { __u32
- __reserved_1 : 24,
- physical_dest : 4,
- __reserved_2 : 4;
- } physical;
-
- struct { __u32
- __reserved_1 : 24,
- logical_dest : 8;
- } logical;
- } dest;
-
+ __u32 __reserved_3 : 24,
+ dest : 8;
} __attribute__ ((packed));
/*
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 5a11146d6d9c..177e92b4019b 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -103,6 +103,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
extern atomic_t mce_entry;
+extern void do_machine_check(struct pt_regs *, long);
+
#endif
#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index c38ebdf6f426..fb558fb1d211 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -11,24 +11,25 @@
#include <asm/smp.h>
-/* Should really switch to dynamic allocation at some point */
-#define NODEMAPSIZE 0x4fff
-
/* Simple perfect hash to map physical addresses to node numbers */
struct memnode {
int shift;
- u8 map[NODEMAPSIZE];
-} ____cacheline_aligned;
+ unsigned int mapsize;
+ u8 *map;
+ u8 embedded_map[64-16];
+} ____cacheline_aligned; /* total size = 64 bytes */
extern struct memnode memnode;
#define memnode_shift memnode.shift
#define memnodemap memnode.map
+#define memnodemapsize memnode.mapsize
extern struct pglist_data *node_data[];
static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
{
unsigned nid;
- VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
+ VIRTUAL_BUG_ON(!memnodemap);
+ VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
nid = memnodemap[addr >> memnode_shift];
VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
return nid;
@@ -46,5 +47,10 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
extern int pfn_valid(unsigned long pfn);
#endif
+#ifdef CONFIG_NUMA_EMU
+#define FAKE_NODE_MIN_SIZE (64*1024*1024)
+#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1ul))
+#endif
+
#endif
#endif
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 16396b1de3e4..6c2949a3c677 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -21,7 +21,7 @@ do { \
unsigned long dummy; \
\
typecheck(atomic_t *, v); \
- typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
LOCK_PREFIX " decl (%%rdi) \n" \
@@ -47,7 +47,7 @@ do { \
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count,
- int fastcall (*fail_fn)(atomic_t *))
+ int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -67,7 +67,7 @@ do { \
unsigned long dummy; \
\
typecheck(atomic_t *, v); \
- typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
LOCK_PREFIX " incl (%%rdi) \n" \
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 43d4c333a8b1..4e28b6060a5e 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -18,11 +18,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
}
-static inline pmd_t *get_pmd(void)
-{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL);
-}
-
static inline void pmd_free(pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 59901c690a0d..730bd6028416 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -359,15 +359,6 @@ static inline int pmd_large(pmd_t pte) {
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
-/* physical address -> PTE */
-static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-{
- pte_t pte;
- pte_val(pte) = physpage | pgprot_val(pgprot);
- pte_val(pte) &= __supported_pte_mask;
- return pte;
-}
-
/* Change flags of a PTE */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 8079e29c14fd..1981f70fcad1 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -367,4 +367,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
return copy_user_generic((__force void *)dst, src, size);
}
+#define ARCH_HAS_NOCACHE_UACCESS 1
+extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
+
+static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+{
+ might_sleep();
+ return __copy_user_nocache(dst, (__force void *)src, size, 1);
+}
+
+static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
+{
+ return __copy_user_nocache(dst, (__force void *)src, size, 0);
+}
+
#endif /* __X86_64_UACCESS_H */
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 05cb8dd200de..0c7847165eae 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -56,11 +56,6 @@ extern struct vxtime_data vxtime;
extern int vgetcpu_mode;
extern struct timezone sys_tz;
extern int sysctl_vsyscall;
-extern seqlock_t xtime_lock;
-
-extern int sysctl_vsyscall;
-
-#define ARCH_HAVE_XTIME_LOCK 1
#endif /* __KERNEL__ */