aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/apic.h17
-rw-r--r--include/asm-x86_64/apicdef.h1
-rw-r--r--include/asm-x86_64/atomic.h3
-rw-r--r--include/asm-x86_64/bitops.h70
-rw-r--r--include/asm-x86_64/cache.h13
-rw-r--r--include/asm-x86_64/cacheflush.h4
-rw-r--r--include/asm-x86_64/compat.h7
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/desc.h18
-rw-r--r--include/asm-x86_64/dma-mapping.h221
-rw-r--r--include/asm-x86_64/dwarf2.h4
-rw-r--r--include/asm-x86_64/e820.h1
-rw-r--r--include/asm-x86_64/edac.h18
-rw-r--r--include/asm-x86_64/fixmap.h2
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hw_irq.h10
-rw-r--r--include/asm-x86_64/i387.h68
-rw-r--r--include/asm-x86_64/ia32.h2
-rw-r--r--include/asm-x86_64/ia32_unistd.h16
-rw-r--r--include/asm-x86_64/idle.h14
-rw-r--r--include/asm-x86_64/io.h5
-rw-r--r--include/asm-x86_64/ioctl.h76
-rw-r--r--include/asm-x86_64/ipi.h4
-rw-r--r--include/asm-x86_64/irq.h2
-rw-r--r--include/asm-x86_64/kdebug.h13
-rw-r--r--include/asm-x86_64/kexec.h37
-rw-r--r--include/asm-x86_64/kprobes.h4
-rw-r--r--include/asm-x86_64/mman.h1
-rw-r--r--include/asm-x86_64/mmu_context.h9
-rw-r--r--include/asm-x86_64/mmzone.h16
-rw-r--r--include/asm-x86_64/mpspec.h2
-rw-r--r--include/asm-x86_64/mutex.h113
-rw-r--r--include/asm-x86_64/numa.h5
-rw-r--r--include/asm-x86_64/page.h22
-rw-r--r--include/asm-x86_64/pci.h11
-rw-r--r--include/asm-x86_64/pda.h11
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/pgtable.h34
-rw-r--r--include/asm-x86_64/processor.h21
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/segment.h4
-rw-r--r--include/asm-x86_64/smp.h1
-rw-r--r--include/asm-x86_64/swiotlb.h16
-rw-r--r--include/asm-x86_64/system.h59
-rw-r--r--include/asm-x86_64/thread_info.h3
-rw-r--r--include/asm-x86_64/timex.h16
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/asm-x86_64/uaccess.h7
-rw-r--r--include/asm-x86_64/unistd.h31
-rw-r--r--include/asm-x86_64/vsyscall.h4
50 files changed, 717 insertions, 336 deletions
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 5647b7de1749..4f6a4dc455bb 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -42,11 +42,6 @@ static __inline void apic_write(unsigned long reg, unsigned int v)
*((volatile unsigned int *)(APIC_BASE+reg)) = v;
}
-static __inline void apic_write_atomic(unsigned long reg, unsigned int v)
-{
- xchg((volatile unsigned int *)(APIC_BASE+reg), v);
-}
-
static __inline unsigned int apic_read(unsigned long reg)
{
return *((volatile unsigned int *)(APIC_BASE+reg));
@@ -57,10 +52,6 @@ static __inline__ void apic_wait_icr_idle(void)
while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
}
-#define FORCE_READ_AROUND_WRITE 0
-#define apic_read_around(x)
-#define apic_write_around(x,y) apic_write((x),(y))
-
static inline void ack_APIC_irq(void)
{
/*
@@ -71,7 +62,7 @@ static inline void ack_APIC_irq(void)
*/
/* Docs say use 0 for future compatibility */
- apic_write_around(APIC_EOI, 0);
+ apic_write(APIC_EOI, 0);
}
extern int get_maxlvt (void);
@@ -113,6 +104,12 @@ extern int disable_timer_pin_1;
extern void setup_threshold_lvt(unsigned long lvt_off);
+void smp_send_timer_broadcast_ipi(void);
+void switch_APIC_timer_to_ipi(void *cpumask);
+void switch_ipi_to_APIC_timer(void *cpumask);
+
+#define ARCH_APICTIMER_STOPS_ON_C3 1
+
#endif /* CONFIG_X86_LOCAL_APIC */
extern unsigned boot_cpu_id;
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index fb1c99ac669f..decaa2d540e8 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -13,6 +13,7 @@
#define APIC_ID 0x20
#define APIC_ID_MASK (0xFFu<<24)
#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_ID(x) (((x)<<24))
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x)&0xFFu)
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 50db9f39274f..4b5cd553e772 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
#define __ARCH_X86_64_ATOMIC__
#include <linux/config.h>
+#include <asm/types.h>
/* atomic_t should be 32 bit signed type */
@@ -389,6 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -424,4 +426,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index 05a0d374404b..eb4df23e1e41 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -29,7 +29,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr) : "memory");
}
@@ -46,7 +46,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
{
__asm__ volatile(
"btsl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr) : "memory");
}
@@ -64,7 +64,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -72,7 +72,7 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
"btrl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -92,7 +92,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
"btcl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -127,7 +127,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -147,7 +147,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
__asm__(
"btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr));
return oldbit;
}
@@ -166,7 +166,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -186,7 +186,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
__asm__(
"btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr));
return oldbit;
}
@@ -198,7 +198,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
__asm__ __volatile__(
"btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -217,7 +217,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -340,6 +340,20 @@ static __inline__ unsigned long __ffs(unsigned long word)
return word;
}
+/*
+ * __fls: find last bit set.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static __inline__ unsigned long __fls(unsigned long word)
+{
+ __asm__("bsrq %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
+}
+
#ifdef __KERNEL__
static inline int sched_find_first_bit(const unsigned long *b)
@@ -370,6 +384,35 @@ static __inline__ int ffs(int x)
}
/**
+ * fls64 - find last bit set in 64 bit word
+ * @x: the word to search
+ *
+ * This is defined the same way as fls.
+ */
+static __inline__ int fls64(__u64 x)
+{
+ if (x == 0)
+ return 0;
+ return __fls(x) + 1;
+}
+
+/**
+ * fls - find last bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ */
+static __inline__ int fls(int x)
+{
+ int r;
+
+ __asm__("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+ return r+1;
+}
+
+/**
* hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh
*
@@ -407,9 +450,6 @@ static __inline__ int ffs(int x)
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((void*)addr,size)
-/* find last set bit */
-#define fls(x) generic_fls(x)
-
#endif /* __KERNEL__ */
#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 33e53424128b..263f0a211ed7 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,17 @@
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
+
+#ifdef CONFIG_X86_VSMP
+
+/* vSMP Internode cacheline shift */
+#define INTERNODE_CACHE_SHIFT (12)
+#ifdef CONFIG_SMP
+#define __cacheline_aligned_in_smp \
+ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
+ __attribute__((__section__(".data.page_aligned")))
+#endif
+
+#endif
#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
index b3189fb229d1..d32f7f58752a 100644
--- a/include/asm-x86_64/cacheflush.h
+++ b/include/asm-x86_64/cacheflush.h
@@ -27,4 +27,8 @@ void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index f0155c38f639..b37ab8218ef0 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -198,8 +198,13 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
static __inline__ void __user *compat_alloc_user_space(long len)
{
- struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs);
+ struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->rsp - len;
}
+static inline int is_compat_task(void)
+{
+ return current_thread_info()->status & TS_COMPAT;
+}
+
#endif /* _ASM_X86_64_COMPAT_H */
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index aea308c65709..41c0ac8559be 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -61,8 +61,9 @@
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
-#define X86_FEATURE_K8_C (3*32+ 4) /* C stepping K8 */
+/* 4 free */
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
+#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 33764869387b..eb7723a46790 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -25,7 +25,7 @@ struct n_desc_struct {
unsigned int a,b;
};
-extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
enum {
GATE_INTERRUPT = 0xE,
@@ -79,6 +79,9 @@ extern struct desc_struct default_ldt[];
extern struct gate_struct idt_table[];
extern struct desc_ptr cpu_gdt_descr[];
+/* the cpu gdt accessor */
+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
+
static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
{
struct gate_struct s;
@@ -114,6 +117,11 @@ static inline void set_system_gate(int nr, void *func)
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
}
+static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
+}
+
static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
unsigned size)
{
@@ -139,20 +147,20 @@ static inline void set_tss_desc(unsigned cpu, void *addr)
* -1? seg base+limit should be pointing to the address of the
* last valid byte
*/
- set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS],
+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
(unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
}
static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
{
- set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (unsigned long)addr,
+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
DESC_LDT, size * 8 - 1);
}
static inline void set_seg_base(unsigned cpu, int entry, void *base)
{
- struct desc_struct *d = &cpu_gdt_table[cpu][entry];
+ struct desc_struct *d = &cpu_gdt(cpu)[entry];
u32 addr = (u32)(u64)base;
BUG_ON((u64)base >> 32);
d->base0 = addr & 0xffff;
@@ -194,7 +202,7 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base)
static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
- u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
+ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
gdt[0] = t->tls_array[0];
gdt[1] = t->tls_array[1];
gdt[2] = t->tls_array[2];
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 36d16dfbac88..49a81a66516e 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -12,155 +12,176 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
-extern dma_addr_t bad_dma_address;
-#define dma_mapping_error(x) \
- (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
-
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
+struct dma_mapping_ops {
+ int (*mapping_error)(dma_addr_t dma_addr);
+ void* (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
+ size_t size, int direction);
+ /* like map_single, but doesn't check the device mask */
+ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
+ size_t size, int direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t addr,
+ size_t size, int direction);
+ void (*sync_single_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_range_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_single_range_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_sg_for_cpu)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ void (*sync_sg_for_device)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+ void (*unmap_sg)(struct device *hwdev,
+ struct scatterlist *sg, int nents,
+ int direction);
+ int (*dma_supported)(struct device *hwdev, u64 mask);
+ int is_phys;
+};
-#ifdef CONFIG_GART_IOMMU
+extern dma_addr_t bad_dma_address;
+extern struct dma_mapping_ops* dma_ops;
+extern int iommu_merge;
-extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
- int direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
- int direction);
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dma_addr);
-#else
+ return (dma_addr == bad_dma_address);
+}
-/* No IOMMU */
+extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
-static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
- size_t size, int direction)
+static inline dma_addr_t
+dma_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction)
{
- dma_addr_t addr;
-
- if (direction == DMA_NONE)
- out_of_line_bug();
- addr = virt_to_bus(ptr);
-
- if ((addr+size) & ~*hwdev->dma_mask)
- out_of_line_bug();
- return addr;
+ return dma_ops->map_single(hwdev, ptr, size, direction);
}
-static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
+ dma_ops->unmap_single(dev, addr, size, direction);
}
-#endif
-
#define dma_map_page(dev,page,offset,size,dir) \
dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-static inline void dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
+#define dma_unmap_page dma_unmap_single
+static inline void
+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (dma_ops->sync_single_for_cpu)
+ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
+ direction);
flush_write_buffers();
}
-static inline void dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
+static inline void
+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
-
+ if (dma_ops->sync_single_for_device)
+ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
+ direction);
flush_write_buffers();
}
-static inline void dma_sync_single_range_for_cpu(struct device *hwdev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
+static inline void
+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_range_for_cpu(hwdev,dma_handle,offset,size,direction);
+ if (dma_ops->sync_single_range_for_cpu) {
+ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
+ }
flush_write_buffers();
}
-static inline void dma_sync_single_range_for_device(struct device *hwdev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
+static inline void
+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_range_for_device(hwdev,dma_handle,offset,size,direction);
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
+ offset, size, direction);
flush_write_buffers();
}
-static inline void dma_sync_sg_for_cpu(struct device *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
+static inline void
+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
-
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
-static inline void dma_sync_sg_for_device(struct device *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
+static inline void
+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
+ if (dma_ops->sync_sg_for_device) {
+ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+ }
flush_write_buffers();
}
-extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
-extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
+static inline int
+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+ return dma_ops->map_sg(hwdev, sg, nents, direction);
+}
-#define dma_unmap_page dma_unmap_single
+static inline void
+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ dma_ops->unmap_sg(hwdev, sg, nents, direction);
+}
extern int dma_supported(struct device *hwdev, u64 mask);
-extern int dma_get_cache_alignment(void);
-#define dma_is_consistent(h) 1
-static inline int dma_set_mask(struct device *dev, u64 mask)
+/* same for gart, swiotlb, and nommu */
+static inline int dma_get_cache_alignment(void)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- *dev->dma_mask = mask;
- return 0;
+ return boot_cpu_data.x86_clflush_size;
}
-static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+#define dma_is_consistent(h) 1
+
+extern int dma_set_mask(struct device *dev, u64 mask);
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
{
flush_write_buffers();
}
-#endif
+extern struct device fallback_dev;
+extern int panic_on_overflow;
+
+#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
index 582757fc0365..07654bd155bf 100644
--- a/include/asm-x86_64/dwarf2.h
+++ b/include/asm-x86_64/dwarf2.h
@@ -14,7 +14,7 @@
away for older version.
*/
-#ifdef CONFIG_DEBUG_INFO
+#ifdef CONFIG_UNWIND_INFO
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
@@ -28,6 +28,7 @@
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
+#define CFI_UNDEFINED .cfi_undefined
#else
@@ -44,6 +45,7 @@
#define CFI_RESTORE #
#define CFI_REMEMBER_STATE #
#define CFI_RESTORE_STATE #
+#define CFI_UNDEFINED #
#endif
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index e682edc24a68..8dcc32665240 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -55,6 +55,7 @@ extern unsigned long e820_hole_size(unsigned long start_pfn,
unsigned long end_pfn);
extern void __init parse_memopt(char *p, char **end);
+extern void __init parse_memmapopt(char *p, char **end);
extern struct e820map e820;
#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
new file mode 100644
index 000000000000..cad1cd42b4ee
--- /dev/null
+++ b/include/asm-x86_64/edac.h
@@ -0,0 +1,18 @@
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(void *va, u32 size)
+{
+ unsigned int *virt_addr = va;
+ u32 i;
+
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
+#endif
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index a582cfcf2231..7b286bd21d1d 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void);
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
-static inline unsigned long fix_to_virt(const unsigned int idx)
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
new file mode 100644
index 000000000000..ada497b0b55b
--- /dev/null
+++ b/include/asm-x86_64/gart-mapping.h
@@ -0,0 +1,16 @@
+#ifndef _X8664_GART_MAPPING_H
+#define _X8664_GART_MAPPING_H 1
+
+#include <linux/types.h>
+#include <asm/types.h>
+
+struct device;
+
+extern void*
+gart_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+extern int
+gart_dma_supported(struct device *hwdev, u64 mask);
+
+#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index c14a8c7267a6..0df1715dee71 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -46,18 +46,18 @@ struct hw_interrupt_type;
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
- *
- * Vectors 0xf0-0xf9 are free (reserved for future Linux use).
*/
#define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
-#define KDB_VECTOR 0xfb /* reserved for KDB */
+/* fb free - please don't readd KDB here because it's useless
+ (hint - think what a NMI bit does to a vector) */
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
-#define INVALIDATE_TLB_VECTOR_END 0xf8
-#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */
+/* f8 free */
+#define INVALIDATE_TLB_VECTOR_END 0xf7
+#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index aa39cfd0e001..876eb9a2fe78 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
*/
#define unlazy_fpu(tsk) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) \
save_init_fpu(tsk); \
} while (0)
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
}
#define clear_fpu(tsk) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) { \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) { \
tolerant_fwait(); \
- (tsk)->thread_info->status &= ~TS_USEDFPU; \
+ task_thread_info(tsk)->status &= ~TS_USEDFPU; \
stts(); \
} \
} while (0)
@@ -75,7 +75,8 @@ extern int set_fpregs(struct task_struct *tsk,
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
{
int err;
- asm volatile("1: rex64 ; fxrstor (%[fx])\n\t"
+
+ asm volatile("1: rex64/fxrstor (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
@@ -86,7 +87,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
" .quad 1b,3b\n"
".previous"
: [err] "=r" (err)
- : [fx] "r" (fx), "0" (0));
+#if 0 /* See comment in __fxsave_clear() below. */
+ : [fx] "r" (fx), "m" (*fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
+#endif
if (unlikely(err))
init_fpu(current);
return err;
@@ -95,7 +100,8 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
{
int err;
- asm volatile("1: rex64 ; fxsave (%[fx])\n\t"
+
+ asm volatile("1: rex64/fxsave (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
@@ -105,20 +111,53 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
" .align 8\n"
" .quad 1b,3b\n"
".previous"
- : [err] "=r" (err)
- : [fx] "r" (fx), "0" (0));
+ : [err] "=r" (err), "=m" (*fx)
+#if 0 /* See comment in __fxsave_clear() below. */
+ : [fx] "r" (fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "0" (0));
+#endif
if (unlikely(err))
__clear_user(fx, sizeof(struct i387_fxsave_struct));
return err;
}
+static inline void __fxsave_clear(struct task_struct *tsk)
+{
+ /* Using "rex64; fxsave %0" is broken because, if the memory operand
+ uses any extended registers for addressing, a second REX prefix
+ will be generated (to the assembler, rex64 followed by semicolon
+ is a separate instruction), and hence the 64-bitness is lost. */
+#if 0
+ /* Using "fxsaveq %0" would be the ideal choice, but is only supported
+ starting with gas 2.16. */
+ __asm__ __volatile__("fxsaveq %0"
+ : "=m" (tsk->thread.i387.fxsave));
+#elif 0
+ /* Using, as a workaround, the properly prefixed form below isn't
+ accepted by any binutils version so far released, complaining that
+ the same type of prefix is used twice if an extended register is
+ needed for addressing (fix submitted to mainline 2005-11-21). */
+ __asm__ __volatile__("rex64/fxsave %0"
+ : "=m" (tsk->thread.i387.fxsave));
+#else
+ /* This, however, we can work around by forcing the compiler to select
+ an addressing mode that doesn't require extended registers. */
+ __asm__ __volatile__("rex64/fxsave %P2(%1)"
+ : "=m" (tsk->thread.i387.fxsave)
+ : "cdaSDb" (tsk),
+ "i" (offsetof(__typeof__(*tsk),
+ thread.i387.fxsave)));
+#endif
+ __asm__ __volatile__("fnclex");
+}
+
static inline void kernel_fpu_begin(void)
{
struct thread_info *me = current_thread_info();
preempt_disable();
- if (me->status & TS_USEDFPU) {
- asm volatile("rex64 ; fxsave %0 ; fnclex"
- : "=m" (me->task->thread.i387.fxsave));
+ if (me->status & TS_USEDFPU) {
+ __fxsave_clear(me->task);
me->status &= ~TS_USEDFPU;
return;
}
@@ -131,11 +170,10 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
-static inline void save_init_fpu( struct task_struct *tsk )
+static inline void save_init_fpu(struct task_struct *tsk)
{
- asm volatile( "rex64 ; fxsave %0 ; fnclex"
- : "=m" (tsk->thread.i387.fxsave));
- tsk->thread_info->status &= ~TS_USEDFPU;
+ __fxsave_clear(tsk);
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
}
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index c7bc9c0525ba..e6b7f2234e43 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -169,6 +169,8 @@ int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
struct linux_binprm;
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
+struct mm_struct;
+extern void ia32_pick_mmap_layout(struct mm_struct *mm);
#endif
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index d5166ec3868d..e87cd83a0e86 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -299,7 +299,21 @@
#define __NR_ia32_inotify_init 291
#define __NR_ia32_inotify_add_watch 292
#define __NR_ia32_inotify_rm_watch 293
+#define __NR_ia32_migrate_pages 294
+#define __NR_ia32_opanat 295
+#define __NR_ia32_mkdirat 296
+#define __NR_ia32_mknodat 297
+#define __NR_ia32_fchownat 298
+#define __NR_ia32_futimesat 299
+#define __NR_ia32_newfstatat 300
+#define __NR_ia32_unlinkat 301
+#define __NR_ia32_renameat 302
+#define __NR_ia32_linkat 303
+#define __NR_ia32_symlinkat 304
+#define __NR_ia32_readlinkat 305
+#define __NR_ia32_fchmodat 306
+#define __NR_ia32_faccessat 307
-#define IA32_NR_syscalls 294 /* must be > than biggest syscall! */
+#define IA32_NR_syscalls 308 /* must be > than biggest syscall! */
#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/idle.h b/include/asm-x86_64/idle.h
new file mode 100644
index 000000000000..6bd47dcf2067
--- /dev/null
+++ b/include/asm-x86_64/idle.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_64_IDLE_H
+#define _ASM_X86_64_IDLE_H 1
+
+#define IDLE_START 1
+#define IDLE_END 2
+
+struct notifier_block;
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+
+void enter_idle(void);
+void exit_idle(void);
+
+#endif
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index 52ff269fe054..9dac18db8291 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -143,6 +143,11 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
+
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
diff --git a/include/asm-x86_64/ioctl.h b/include/asm-x86_64/ioctl.h
index 609b663b6bf4..b279fe06dfe5 100644
--- a/include/asm-x86_64/ioctl.h
+++ b/include/asm-x86_64/ioctl.h
@@ -1,75 +1 @@
-/* $Id: ioctl.h,v 1.2 2001/07/04 09:08:13 ak Exp $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _ASMX8664_IOCTL_H
-#define _ASMX8664_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASMX8664_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
index 022e9d340ad7..2a5c162b7d92 100644
--- a/include/asm-x86_64/ipi.h
+++ b/include/asm-x86_64/ipi.h
@@ -38,10 +38,6 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns
icr |= APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
- /*
- * Setup KDB IPI to be delivered as an NMI
- */
- case KDB_VECTOR:
icr |= APIC_DM_NMI;
break;
}
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
index fb724ba37ae6..9db5a1b4f7b1 100644
--- a/include/asm-x86_64/irq.h
+++ b/include/asm-x86_64/irq.h
@@ -36,7 +36,7 @@
#define NR_IRQ_VECTORS NR_IRQS
#else
#define NR_IRQS 224
-#define NR_IRQ_VECTORS 1024
+#define NR_IRQ_VECTORS (32 * NR_CPUS)
#endif
static __inline__ int irq_canonicalize(int irq)
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index f604e84c5303..b9ed4c0c8783 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -35,9 +35,16 @@ enum die_val {
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
-{
- struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
+static inline int notify_die(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
return notifier_call_chain(&die_chain, val, &args);
}
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 42d2ff15c592..ae28cd44bcd3 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -3,6 +3,7 @@
#include <asm/page.h>
#include <asm/proto.h>
+#include <asm/ptrace.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -26,8 +27,40 @@
#define KEXEC_ARCH KEXEC_ARCH_X86_64
#define MAX_NOTE_BYTES 1024
-typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
-extern note_buf_t crash_notes[];
+/*
+ * Saving the registers of the cpu on which panic occured in
+ * crash_kexec to save a valid sp. The registers of other cpus
+ * will be saved in machine_crash_shutdown while shooting down them.
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else {
+ __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx));
+ __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx));
+ __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx));
+ __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi));
+ __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi));
+ __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp));
+ __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax));
+ __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp));
+ __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
+ __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
+ __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
+ __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
+ __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
+ __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
+ __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
+ __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
+ __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags));
+ newregs->rip = (unsigned long)current_text_addr();
+ }
+}
#endif /* _X86_64_KEXEC_H */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 4dd7a7e148d4..98a1e95ddb98 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -27,7 +27,10 @@
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
struct pt_regs;
+struct kprobe;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
@@ -42,6 +45,7 @@ typedef u8 kprobe_opcode_t;
#define ARCH_SUPPORTS_KRETPROBES
void kretprobe_trampoline(void);
+extern void arch_remove_kprobe(struct kprobe *p);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
index 78e60a4fd4ee..d0e97b74f735 100644
--- a/include/asm-x86_64/mman.h
+++ b/include/asm-x86_64/mman.h
@@ -36,6 +36,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index b630d52bdfb1..16e4be4de0c5 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -15,18 +15,13 @@
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
+#ifdef CONFIG_SMP
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
-}
-#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
#endif
+}
static inline void load_cr3(pgd_t *pgd)
{
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 69baaa8a3ce0..972c9359f7d7 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -36,22 +36,12 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
NODE_DATA(nid)->node_spanned_pages)
#ifdef CONFIG_DISCONTIGMEM
-
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
-/* Requires pfn_valid(pfn) to be true */
-#define pfn_to_page(pfn) ({ \
- int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \
- ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \
-})
-
-#define page_to_pfn(page) \
- (long)(((page) - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn)
-
-#define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \
- ({ u8 nid__ = pfn_to_nid(pfn); \
- nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); }))
+extern struct page *pfn_to_page(unsigned long pfn);
+extern unsigned long page_to_pfn(struct page *page);
+extern int pfn_valid(unsigned long pfn);
#endif
#define local_mapnr(kvaddr) \
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index 6f8a17d105ab..10248a9a0582 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -76,7 +76,7 @@ struct mpc_config_bus
{
unsigned char mpc_type;
unsigned char mpc_busid;
- unsigned char mpc_bustype[6] __attribute((packed));
+ unsigned char mpc_bustype[6];
};
/* List of Bus Type string values, Intel MP Spec. */
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
new file mode 100644
index 000000000000..11fbee2bd6c0
--- /dev/null
+++ b/include/asm-x86_64/mutex.h
@@ -0,0 +1,113 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - decrement and call function if negative
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is negative
+ *
+ * Atomically decrements @v and calls <fail_fn> if the result is negative.
+ */
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " decl (%%rdi) \n" \
+ " js 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count,
+ int fastcall (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - increment and call function if nonpositive
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is nonpositive
+ *
+ * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
+ */
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " incl (%%rdi) \n" \
+ " jle 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
+ * if it wasn't 1 originally. [the fallback function is never used on
+ * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ return 1;
+ else
+ return 0;
+}
+
+#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index d51e56fdc3da..34e434ce3268 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -20,6 +20,11 @@ extern int numa_off;
extern void numa_set_node(int cpu, int node);
extern unsigned char apicid_to_node[256];
+#ifdef CONFIG_NUMA
+extern void __init init_cpu_to_node(void);
+#else
+#define init_cpu_to_node() do {} while (0)
+#endif
#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 06e489f32472..615e3e494929 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -14,13 +14,25 @@
#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
#define THREAD_ORDER 1
-#ifdef __ASSEMBLY__
-#define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
-#else
-#define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
-#endif
+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE-1))
+#define EXCEPTION_STACK_ORDER 0
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+
+#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER
+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+
+#define IRQSTACK_ORDER 2
+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+
#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index eeb3088a1c9e..fd03e15d7ea6 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -42,18 +42,20 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/page.h>
+#include <linux/dma-mapping.h> /* for have_iommu */
extern int iommu_setup(char *opt);
-#ifdef CONFIG_GART_IOMMU
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions
*
- * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
- * that an IOMMU is available.
+ * On AMD64 it mostly equals, but we set it to zero if a hardware
+ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
*/
-#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
+
+#ifdef CONFIG_GART_IOMMU
/*
* x86-64 always supports DAC, but sometimes it is useful to force
@@ -79,7 +81,6 @@ extern int iommu_sac_force;
#else
/* No IOMMU */
-#define PCI_DMA_BUS_IS_PHYS 1
#define pci_dac_dma_supported(pci_dev, mask) 1
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index 8733ccfa442e..c7ab38a601af 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -5,6 +5,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
+#include <asm/page.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
@@ -12,6 +13,9 @@ struct x8664_pda {
unsigned long data_offset; /* Per cpu data offset from linker address */
unsigned long kernelstack; /* top of kernel stack for current */
unsigned long oldrsp; /* user rsp for system call */
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ unsigned long debugstack; /* #DB/#BP stack. */
+#endif
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */
@@ -23,11 +27,10 @@ struct x8664_pda {
unsigned apic_timer_irqs;
} ____cacheline_aligned_in_smp;
+extern struct x8664_pda *_cpu_pda[];
+extern struct x8664_pda boot_cpu_pda[];
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
-extern struct x8664_pda cpu_pda[];
+#define cpu_pda(i) (_cpu_pda[i])
/*
* There is no fast way to get the base address of the PDA, all the accesses
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 9c71855736fb..29a6b0408f75 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,7 +11,7 @@
#include <asm/pda.h>
-#define __per_cpu_offset(cpu) (cpu_pda[cpu].data_offset)
+#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
#define __my_cpu_offset() read_pda(data_offset)
/* Separate out the type, so (int[3], foo) works. */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index ecf58c7c1650..8fbf4dd72115 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
#define pte_same(a, b) ((a).pte == (b).pte)
+#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
+
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PUD_SIZE (1UL << PUD_SHIFT)
@@ -265,25 +267,25 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
*/
#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
-extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
+static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
struct vm_area_struct;
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 4861246548f7..8c8d88c036ed 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -227,7 +227,13 @@ struct tss_struct {
extern struct cpuinfo_x86 boot_cpu_data;
DECLARE_PER_CPU(struct tss_struct,init_tss);
+#ifdef CONFIG_X86_VSMP
+#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
+#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
+#else
#define ARCH_MIN_TASKALIGN 16
+#define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
struct thread_struct {
unsigned long rsp0;
@@ -267,15 +273,6 @@ struct thread_struct {
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-#define EXCEPTION_STACK_ORDER 0
-
#define start_thread(regs,new_rip,new_rsp) do { \
asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
load_gs_index(0); \
@@ -317,8 +314,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) \
- (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
@@ -480,4 +477,6 @@ extern unsigned long boot_option_idle_override;
/* Boot loader type from the setup header */
extern int bootloader_type;
+#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
+
#endif /* __ASM_X86_64_PROCESSOR_H */
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 34501086afef..115e496c6139 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -67,8 +67,6 @@ extern void load_gs_index(unsigned gs);
extern unsigned long end_pfn_map;
-extern cpumask_t cpu_initialized;
-
extern void show_trace(unsigned long * rsp);
extern void show_registers(struct pt_regs *regs);
@@ -91,8 +89,12 @@ extern void check_efer(void);
extern int unhandled_signal(struct task_struct *tsk, int sig);
+extern int unsynchronized_tsc(void);
+
extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern void swiotlb_init(void);
+
+extern void gart_parse_options(char *);
+extern void __init no_iommu_init(void);
extern unsigned long table_start, table_end;
@@ -106,12 +108,17 @@ extern int skip_ioapic_setup;
extern int acpi_ht;
extern int acpi_disabled;
+#ifdef CONFIG_GART_IOMMU
extern int fallback_aper_order;
extern int fallback_aper_force;
extern int iommu_aperture;
-extern int iommu_aperture_disabled;
extern int iommu_aperture_allowed;
+extern int iommu_aperture_disabled;
extern int fix_aperture;
+#else
+#define iommu_aperture 0
+#define iommu_aperture_allowed 0
+#endif
extern int force_iommu;
extern int reboot_force;
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index 44adaf18c11e..d4bed33fb32c 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -19,15 +19,13 @@
#define __USER_DS 0x2b /* 5*8+3 */
#define __USER_CS 0x33 /* 6*8+3 */
#define __USER32_DS __USER_DS
-#define __KERNEL16_CS (GDT_ENTRY_KERNELCS16 * 8)
-#define __KERNEL_COMPAT32_CS 0x8
#define GDT_ENTRY_TLS 1
#define GDT_ENTRY_TSS 8 /* needs two entries */
#define GDT_ENTRY_LDT 10 /* needs two entries */
#define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14
-#define GDT_ENTRY_KERNELCS16 15
+/* 15 free */
#define GDT_ENTRY_TLS_ENTRIES 3
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d030409a8fb5..9ccbb2cfd5c0 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -35,6 +35,7 @@ extern cpumask_t cpu_present_mask;
extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
/*
* Private routines/data
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index dddf1b218681..60757efd1353 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -3,10 +3,14 @@
#include <linux/config.h>
+#include <asm/dma-mapping.h>
+
/* SWIOTLB interface */
-extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
- int dir);
+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
+ size_t size, int dir);
+extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags);
extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir);
extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
@@ -34,10 +38,10 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags);
extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern void swiotlb_init(void);
#ifdef CONFIG_SWIOTLB
extern int swiotlb;
@@ -45,4 +49,6 @@ extern int swiotlb;
#define swiotlb 0
#endif
-#endif
+extern void pci_swiotlb_init(void);
+
+#endif /* _ASM_SWTIOLB_H */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 85348e02ad2e..a73f0c789d8b 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -20,8 +20,8 @@
#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
+#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
#define __EXTRA_CLOBBER \
,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
@@ -137,6 +137,21 @@ struct alt_instr {
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature), ##input)
+/* Like alternative_input, but with a single output argument */
+#define alternative_io(oldinstr, newinstr, feature, output, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c[feat]\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" : output : [feat] "i" (feature), ##input)
+
/*
* Clear and set 'TS' bit respectively
*/
@@ -178,6 +193,15 @@ static inline void write_cr4(unsigned long val)
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
#endif /* __KERNEL__ */
#define nop() __asm__ __volatile__ ("nop")
@@ -311,10 +335,24 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* interrupt control.. */
#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
+
+#ifdef CONFIG_X86_VSMP
+/* Interrupt control for VSMP architecture */
+#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
+#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ (flags & (1<<18)) || !(flags & (1<<9)); \
+})
+
+/* For spinlocks etc */
+#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
+#else /* CONFIG_X86_VSMP */
#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
-/* used in the idle loop; sti takes one instruction cycle to complete */
-#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
#define irqs_disabled() \
({ \
@@ -325,15 +363,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* For spinlocks etc */
#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
+#endif
-void cpu_idle_wait(void);
+/* used in the idle loop; sti takes one instruction cycle to complete */
+#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+/* used when interrupts are already enabled or to shutdown the processor */
+#define halt() __asm__ __volatile__("hlt": : :"memory")
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
+void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 08eb6e4f3737..4ac0e0a36934 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
@@ -138,6 +136,7 @@ static inline struct thread_info *stack_thread_info(void)
* have to worry about atomic accesses.
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+#define TS_COMPAT 0x0002 /* 32bit syscall active */
#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index f971f45d6d78..f18443fcdf04 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -10,6 +10,9 @@
#include <asm/msr.h>
#include <asm/vsyscall.h>
#include <asm/hpet.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <linux/compiler.h>
#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
@@ -23,6 +26,19 @@ static inline cycles_t get_cycles (void)
return ret;
}
+/* Like get_cycles, but make sure the CPU is synchronized. */
+static __always_inline cycles_t get_cycles_sync(void)
+{
+ unsigned long long ret;
+ unsigned eax;
+ /* Don't do an additional sync on CPUs where we know
+ RDTSC is already synchronous. */
+ alternative_io(ASM_NOP2, "cpuid", X86_FEATURE_SYNC_RDTSC,
+ "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+ rdtscll(ret);
+ return ret;
+}
+
extern unsigned int cpu_khz;
extern int read_current_timer(unsigned long *timer_value);
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 7d82bc56b9fa..2fa7f27381b4 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 1bb8b8a24436..bddffcb591b8 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -244,7 +244,7 @@ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned le
extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
-static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
+static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
@@ -273,7 +273,7 @@ static inline int __copy_from_user(void *dst, const void __user *src, unsigned s
}
}
-static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
+static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
@@ -305,7 +305,7 @@ static inline int __copy_to_user(void __user *dst, const void *src, unsigned siz
}
-static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
@@ -348,6 +348,7 @@ static inline int __copy_in_user(void __user *dst, const void __user *src, unsig
long strncpy_from_user(char *dst, const char __user *src, long count);
long __strncpy_from_user(char *dst, const char __user *src, long count);
long strnlen_user(const char __user *str, long n);
+long __strnlen_user(const char __user *str, long n);
long strlen_user(const char __user *str);
unsigned long clear_user(void __user *mem, unsigned long len);
unsigned long __clear_user(void __user *mem, unsigned long len);
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 2c42150bce0c..436d099b5b6b 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -571,8 +571,37 @@ __SYSCALL(__NR_inotify_init, sys_inotify_init)
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 255
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+#define __NR_migrate_pages 256
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
+#define __NR_openat 257
+__SYSCALL(__NR_openat, sys_openat)
+#define __NR_mkdirat 258
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_mknodat 259
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_fchownat 260
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_futimesat 261
+__SYSCALL(__NR_futimesat, sys_futimesat)
+#define __NR_newfstatat 262
+__SYSCALL(__NR_newfstatat, sys_newfstatat)
+#define __NR_unlinkat 263
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_renameat 264
+__SYSCALL(__NR_renameat, sys_renameat)
+#define __NR_linkat 265
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_symlinkat 266
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_readlinkat 267
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR_fchmodat 268
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_faccessat 269
+__SYSCALL(__NR_faccessat, sys_faccessat)
+
+#define __NR_syscall_max __NR_faccessat
-#define __NR_syscall_max __NR_inotify_rm_watch
#ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 438a3f52f839..a85e16f56d73 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -36,8 +36,8 @@ struct vxtime_data {
int mode;
};
-#define hpet_readl(a) readl((void *)fix_to_virt(FIX_HPET_BASE) + a)
-#define hpet_writel(d,a) writel(d, (void *)fix_to_virt(FIX_HPET_BASE) + a)
+#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
+#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
/* vsyscall space (readonly) */
extern struct vxtime_data __vxtime;