aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-07-26 23:04:59 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-07-26 23:04:59 +0100
commitd9ecdb282c91952796b7542c4f57fd6de6948d7b (patch)
treefd4de7923968afa7d2981fb037e2255fc2cfa1e1 /include
parent[ARM] fix nwflash.c: 6ee8928d94841aa764aeaf645ad16daff811dc26 (diff)
parent[ARM] update defconfig for eseries. (diff)
downloadlinux-dev-d9ecdb282c91952796b7542c4f57fd6de6948d7b.tar.xz
linux-dev-d9ecdb282c91952796b7542c4f57fd6de6948d7b.zip
Merge branch 'for_rmk_13' of git://git.mnementh.co.uk/linux-2.6-im
Diffstat (limited to '')
-rw-r--r--include/asm-alpha/dma-mapping.h6
-rw-r--r--include/asm-alpha/pci.h2
-rw-r--r--include/asm-arm/arch-pxa/pxa25x-udc.h2
-rw-r--r--include/asm-arm/cacheflush.h4
-rw-r--r--include/asm-arm/dma-mapping.h2
-rw-r--r--include/asm-avr32/dma-mapping.h2
-rw-r--r--include/asm-blackfin/bfin-global.h8
-rw-r--r--include/asm-blackfin/dma.h8
-rw-r--r--include/asm-blackfin/dpmc.h82
-rw-r--r--include/asm-blackfin/elf.h2
-rw-r--r--include/asm-blackfin/gpio.h19
-rw-r--r--include/asm-blackfin/mach-bf527/anomaly.h2
-rw-r--r--include/asm-blackfin/mach-bf527/bfin_sir.h21
-rw-r--r--include/asm-blackfin/mach-bf527/defBF527.h1
-rw-r--r--include/asm-blackfin/mach-bf527/mem_init.h27
-rw-r--r--include/asm-blackfin/mach-bf533/bfin_sir.h13
-rw-r--r--include/asm-blackfin/mach-bf533/mem_init.h27
-rw-r--r--include/asm-blackfin/mach-bf537/bfin_sir.h21
-rw-r--r--include/asm-blackfin/mach-bf537/defBF537.h1
-rw-r--r--include/asm-blackfin/mach-bf537/mem_init.h27
-rw-r--r--include/asm-blackfin/mach-bf548/bfin_sir.h37
-rw-r--r--include/asm-blackfin/mach-bf548/gpio.h8
-rw-r--r--include/asm-blackfin/mach-bf561/bfin_sir.h13
-rw-r--r--include/asm-blackfin/mach-bf561/mem_init.h27
-rw-r--r--include/asm-blackfin/module.h5
-rw-r--r--include/asm-blackfin/processor.h21
-rw-r--r--include/asm-cris/dma-mapping.h2
-rw-r--r--include/asm-frv/dma-mapping.h2
-rw-r--r--include/asm-generic/dma-mapping-broken.h2
-rw-r--r--include/asm-generic/dma-mapping.h4
-rw-r--r--include/asm-generic/pci-dma-compat.h4
-rw-r--r--include/asm-generic/syscall.h141
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/asm-ia64/machvec.h2
-rw-r--r--include/asm-m68k/dma-mapping.h2
-rw-r--r--include/asm-mips/dma-mapping.h2
-rw-r--r--include/asm-mn10300/dma-mapping.h2
-rw-r--r--include/asm-parisc/cacheflush.h4
-rw-r--r--include/asm-parisc/dma-mapping.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h2
-rw-r--r--include/asm-sh/dma-mapping.h2
-rw-r--r--include/asm-sparc/dma-mapping_64.h2
-rw-r--r--include/asm-sparc/pci_32.h3
-rw-r--r--include/asm-sparc/pci_64.h5
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping.h99
-rw-r--r--include/asm-x86/gpio.h6
-rw-r--r--include/asm-x86/iommu.h1
-rw-r--r--include/asm-x86/kexec.h18
-rw-r--r--include/asm-x86/mach-summit/mach_apic.h2
-rw-r--r--include/asm-x86/pgtable.h9
-rw-r--r--include/asm-x86/swiotlb.h2
-rw-r--r--include/asm-x86/uaccess.h1
-rw-r--r--include/asm-xtensa/dma-mapping.h2
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/crash_dump.h6
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/i2o.h2
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/kexec.h17
-rw-r--r--include/linux/memstick.h6
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/pagemap.h111
-rw-r--r--include/linux/parport.h3
-rw-r--r--include/linux/percpu.h29
-rw-r--r--include/linux/ptrace.h72
-rw-r--r--include/linux/radix-tree.h12
-rw-r--r--include/linux/relay.h5
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sched.h28
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/tracehook.h575
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/video/atmel_lcdc.h1
79 files changed, 1288 insertions, 361 deletions
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index db351d1296f4..a5801ae02e4b 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -24,8 +24,8 @@
pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
#define dma_supported(dev, mask) \
pci_dma_supported(alpha_gendev_to_pci(dev), mask)
-#define dma_mapping_error(addr) \
- pci_dma_mapping_error(addr)
+#define dma_mapping_error(dev, addr) \
+ pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr)
#else /* no PCI - no IOMMU. */
@@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
#define dma_unmap_page(dev, addr, size, dir) ((void)0)
#define dma_unmap_sg(dev, sg, nents, dir) ((void)0)
-#define dma_mapping_error(addr) (0)
+#define dma_mapping_error(dev, addr) (0)
#endif /* !CONFIG_PCI */
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index d31fd49ff79a..2a14302c17a3 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
/* Test for pci_map_single or pci_map_page having generated an error. */
static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
diff --git a/include/asm-arm/arch-pxa/pxa25x-udc.h b/include/asm-arm/arch-pxa/pxa25x-udc.h
index 840305916b6d..1b80a4805a60 100644
--- a/include/asm-arm/arch-pxa/pxa25x-udc.h
+++ b/include/asm-arm/arch-pxa/pxa25x-udc.h
@@ -2,7 +2,7 @@
#define _ASM_ARCH_PXA25X_UDC_H
#ifdef _ASM_ARCH_PXA27X_UDC_H
-#error You can't include both PXA25x and PXA27x UDC support
+#error "You can't include both PXA25x and PXA27x UDC support"
#endif
#define UDC_RES1 __REG(0x40600004) /* UDC Undocumented - Reserved1 */
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index 70b0fe724b62..03cf1ee977b7 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -424,9 +424,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
}
#define flush_dcache_mmap_lock(mapping) \
- write_lock_irq(&(mapping)->tree_lock)
+ spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
- write_unlock_irq(&(mapping)->tree_lock)
+ spin_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index e99406a7bece..f41335ba6337 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == ~0;
}
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 57dc672bab8e..0399359ab5d8 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
/*
* dma_map_single can't fail as it is implemented now.
*/
-static inline int dma_mapping_error(dma_addr_t addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
{
return 0;
}
diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h
index 76033831eb35..320aa5e167e9 100644
--- a/include/asm-blackfin/bfin-global.h
+++ b/include/asm-blackfin/bfin-global.h
@@ -92,16 +92,20 @@ extern void *l1_data_B_sram_alloc(size_t);
extern void *l1_inst_sram_alloc(size_t);
extern void *l1_data_sram_alloc(size_t);
extern void *l1_data_sram_zalloc(size_t);
+extern void *l2_sram_alloc(size_t);
+extern void *l2_sram_zalloc(size_t);
extern int l1_data_A_sram_free(const void*);
extern int l1_data_B_sram_free(const void*);
extern int l1_inst_sram_free(const void*);
extern int l1_data_sram_free(const void*);
+extern int l2_sram_free(const void *);
extern int sram_free(const void*);
#define L1_INST_SRAM 0x00000001
#define L1_DATA_A_SRAM 0x00000002
#define L1_DATA_B_SRAM 0x00000004
#define L1_DATA_SRAM 0x00000006
+#define L2_SRAM 0x00000008
extern void *sram_alloc_with_lsl(size_t, unsigned long);
extern int sram_free_with_lsl(const void*);
@@ -114,7 +118,9 @@ extern struct file_operations dpmc_fops;
extern unsigned long _ramstart, _ramend, _rambase;
extern unsigned long memory_start, memory_end, physical_mem_end;
extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
- _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[];
+ _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[],
+ _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
+ _ebss_l2[], _l2_lma_start[];
#ifdef CONFIG_MTD_UCLINUX
extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
diff --git a/include/asm-blackfin/dma.h b/include/asm-blackfin/dma.h
index c0d5259e315b..3cd4b522aa3f 100644
--- a/include/asm-blackfin/dma.h
+++ b/include/asm-blackfin/dma.h
@@ -144,8 +144,16 @@ struct dma_channel {
void *data;
unsigned int dma_enable_flag;
unsigned int loopback_flag;
+#ifdef CONFIG_PM
+ unsigned short saved_peripheral_map;
+#endif
};
+#ifdef CONFIG_PM
+int blackfin_dma_suspend(void);
+void blackfin_dma_resume(void);
+#endif
+
/*******************************************************************************
* DMA API's
*******************************************************************************/
diff --git a/include/asm-blackfin/dpmc.h b/include/asm-blackfin/dpmc.h
index 7f34cd384f12..de28e6e018b3 100644
--- a/include/asm-blackfin/dpmc.h
+++ b/include/asm-blackfin/dpmc.h
@@ -7,63 +7,18 @@
#ifndef _BLACKFIN_DPMC_H_
#define _BLACKFIN_DPMC_H_
-#define SLEEP_MODE 1
-#define DEEP_SLEEP_MODE 2
-#define ACTIVE_PLL_DISABLED 3
-#define FULLON_MODE 4
-#define ACTIVE_PLL_ENABLED 5
-#define HIBERNATE_MODE 6
-
-#define IOCTL_FULL_ON_MODE _IO('s', 0xA0)
-#define IOCTL_ACTIVE_MODE _IO('s', 0xA1)
-#define IOCTL_SLEEP_MODE _IO('s', 0xA2)
-#define IOCTL_DEEP_SLEEP_MODE _IO('s', 0xA3)
-#define IOCTL_HIBERNATE_MODE _IO('s', 0xA4)
-#define IOCTL_CHANGE_FREQUENCY _IOW('s', 0xA5, unsigned long)
-#define IOCTL_CHANGE_VOLTAGE _IOW('s', 0xA6, unsigned long)
-#define IOCTL_SET_CCLK _IOW('s', 0xA7, unsigned long)
-#define IOCTL_SET_SCLK _IOW('s', 0xA8, unsigned long)
-#define IOCTL_GET_PLLSTATUS _IOW('s', 0xA9, unsigned long)
-#define IOCTL_GET_CORECLOCK _IOW('s', 0xAA, unsigned long)
-#define IOCTL_GET_SYSTEMCLOCK _IOW('s', 0xAB, unsigned long)
-#define IOCTL_GET_VCO _IOW('s', 0xAC, unsigned long)
-#define IOCTL_DISABLE_WDOG_TIMER _IO('s', 0xAD)
-#define IOCTL_UNMASK_WDOG_WAKEUP_EVENT _IO('s',0xAE)
-#define IOCTL_PROGRAM_WDOG_TIMER _IOW('s',0xAF,unsigned long)
-#define IOCTL_CLEAR_WDOG_WAKEUP_EVENT _IO('s',0xB0)
-#define IOCTL_SLEEP_DEEPER_MODE _IO('s',0xB1)
-
-#define DPMC_MINOR 254
-
-#define ON 0
-#define OFF 1
-
#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
-unsigned long calc_volt(void);
-int calc_vlev(int vlt);
-unsigned long change_voltage(unsigned long volt);
-int calc_msel(int vco_hz);
-unsigned long change_frequency(unsigned long vco_mhz);
-int set_pll_div(unsigned short sel, unsigned char flag);
-int get_vco(void);
-unsigned long change_system_clock(unsigned long clock);
-unsigned long change_core_clock(unsigned long clock);
-unsigned long get_pll_status(void);
-void change_baud(int baud);
-void fullon_mode(void);
-void active_mode(void);
void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void deep_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
-void program_wdog_timer(unsigned long);
-void unmask_wdog_wakeup_evt(void);
-void clear_wdog_wakeup_evt(void);
-void disable_wdog_timer(void);
+void do_hibernate(int wakeup);
+void set_dram_srfs(void);
+void unset_dram_srfs(void);
-extern unsigned long get_cclk(void);
-extern unsigned long get_sclk(void);
+#define VRPAIR(vlev, freq) (((vlev) << 16) | ((freq) >> 16))
struct bfin_dpmc_platform_data {
const unsigned int *tuple_tab;
@@ -71,8 +26,33 @@ struct bfin_dpmc_platform_data {
unsigned short vr_settling_time; /* in us */
};
-#define VRPAIR(vlev, freq) (((vlev) << 16) | ((freq) >> 16))
+#else
+
+#define PM_PUSH(x) \
+ R0 = [P0 + (x - SRAM_BASE_ADDRESS)];\
+ [--SP] = R0;\
+
+#define PM_POP(x) \
+ R0 = [SP++];\
+ [P0 + (x - SRAM_BASE_ADDRESS)] = R0;\
+
+#define PM_SYS_PUSH(x) \
+ R0 = [P0 + (x - PLL_CTL)];\
+ [--SP] = R0;\
+
+#define PM_SYS_POP(x) \
+ R0 = [SP++];\
+ [P0 + (x - PLL_CTL)] = R0;\
+
+#define PM_SYS_PUSH16(x) \
+ R0 = w[P0 + (x - PLL_CTL)];\
+ [--SP] = R0;\
+
+#define PM_SYS_POP16(x) \
+ R0 = [SP++];\
+ w[P0 + (x - PLL_CTL)] = R0;\
+#endif
#endif /* __KERNEL__ */
#endif /*_BLACKFIN_DPMC_H_*/
diff --git a/include/asm-blackfin/elf.h b/include/asm-blackfin/elf.h
index 30303fc8292c..67a03a8a353e 100644
--- a/include/asm-blackfin/elf.h
+++ b/include/asm-blackfin/elf.h
@@ -15,6 +15,8 @@
#define EF_BFIN_FDPIC 0x00000002 /* -mfdpic */
#define EF_BFIN_CODE_IN_L1 0x00000010 /* --code-in-l1 */
#define EF_BFIN_DATA_IN_L1 0x00000020 /* --data-in-l1 */
+#define EF_BFIN_CODE_IN_L2 0x00000040 /* --code-in-l2 */
+#define EF_BFIN_DATA_IN_L2 0x00000080 /* --data-in-l2 */
typedef unsigned long elf_greg_t;
diff --git a/include/asm-blackfin/gpio.h b/include/asm-blackfin/gpio.h
index ff95e9d88342..168f1251eb4d 100644
--- a/include/asm-blackfin/gpio.h
+++ b/include/asm-blackfin/gpio.h
@@ -376,8 +376,12 @@ struct gpio_port_t {
#endif
#ifdef CONFIG_PM
-unsigned int bfin_pm_setup(void);
-void bfin_pm_restore(void);
+
+unsigned int bfin_pm_standby_setup(void);
+void bfin_pm_standby_restore(void);
+
+void bfin_gpio_pm_hibernate_restore(void);
+void bfin_gpio_pm_hibernate_suspend(void);
#ifndef CONFIG_BF54x
#define PM_WAKE_RISING 0x1
@@ -392,17 +396,8 @@ void gpio_pm_wakeup_free(unsigned gpio);
struct gpio_port_s {
unsigned short data;
- unsigned short data_clear;
- unsigned short data_set;
- unsigned short toggle;
unsigned short maska;
- unsigned short maska_clear;
- unsigned short maska_set;
- unsigned short maska_toggle;
unsigned short maskb;
- unsigned short maskb_clear;
- unsigned short maskb_set;
- unsigned short maskb_toggle;
unsigned short dir;
unsigned short polar;
unsigned short edge;
@@ -411,10 +406,10 @@ struct gpio_port_s {
unsigned short fer;
unsigned short reserved;
+ unsigned short mux;
};
#endif /*CONFIG_BF54x*/
#endif /*CONFIG_PM*/
-
/***********************************************************
*
* FUNCTIONS: Blackfin GPIO Driver
diff --git a/include/asm-blackfin/mach-bf527/anomaly.h b/include/asm-blackfin/mach-bf527/anomaly.h
index 4725268a5ada..b7b166f4f064 100644
--- a/include/asm-blackfin/mach-bf527/anomaly.h
+++ b/include/asm-blackfin/mach-bf527/anomaly.h
@@ -23,6 +23,8 @@
#define ANOMALY_05000245 (1)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (1)
+/* New Feature: EMAC TX DMA Word Alignment */
+#define ANOMALY_05000285 (1)
/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* Incorrect Access of OTP_STATUS During otp_write() Function */
diff --git a/include/asm-blackfin/mach-bf527/bfin_sir.h b/include/asm-blackfin/mach-bf527/bfin_sir.h
index 0612d0c9501c..cfd8ad4f1f2c 100644
--- a/include/asm-blackfin/mach-bf527/bfin_sir.h
+++ b/include/asm-blackfin/mach-bf527/bfin_sir.h
@@ -118,16 +118,25 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
#define DRIVER_NAME "bfin_sir"
-static void bfin_sir_hw_init(void)
+static int bfin_sir_hw_init(void)
{
+ int ret = -ENODEV;
#ifdef CONFIG_BFIN_SIR0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
#ifdef CONFIG_BFIN_SIR1
- peripheral_request(P_UART1_TX, DRIVER_NAME);
- peripheral_request(P_UART1_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART1_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART1_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
- SSYNC();
+ return ret;
}
diff --git a/include/asm-blackfin/mach-bf527/defBF527.h b/include/asm-blackfin/mach-bf527/defBF527.h
index 82134f578f32..f1a70db70cb8 100644
--- a/include/asm-blackfin/mach-bf527/defBF527.h
+++ b/include/asm-blackfin/mach-bf527/defBF527.h
@@ -302,6 +302,7 @@
#define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */
#define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */
#define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */
+#define TXDWA 0x00000010 /* Transmit Frame DMA Word Alignment (Odd/Even*) */
#define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */
#define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */
diff --git a/include/asm-blackfin/mach-bf527/mem_init.h b/include/asm-blackfin/mach-bf527/mem_init.h
index 008ca66719e2..cbe03f4a5698 100644
--- a/include/asm-blackfin/mach-bf527/mem_init.h
+++ b/include/asm-blackfin/mach-bf527/mem_init.h
@@ -146,33 +146,6 @@
#define SDRAM_CL CL_3
#endif
-#if (CONFIG_MEM_SIZE == 128)
-#define SDRAM_SIZE EBSZ_128
-#endif
-#if (CONFIG_MEM_SIZE == 64)
-#define SDRAM_SIZE EBSZ_64
-#endif
-#if (CONFIG_MEM_SIZE == 32)
-#define SDRAM_SIZE EBSZ_32
-#endif
-#if (CONFIG_MEM_SIZE == 16)
-#define SDRAM_SIZE EBSZ_16
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 11)
-#define SDRAM_WIDTH EBCAW_11
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 10)
-#define SDRAM_WIDTH EBCAW_10
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 9)
-#define SDRAM_WIDTH EBCAW_9
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 8)
-#define SDRAM_WIDTH EBCAW_8
-#endif
-
-#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE)
-
/* Equation from section 17 (p17-46) of BF533 HRM */
#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
diff --git a/include/asm-blackfin/mach-bf533/bfin_sir.h b/include/asm-blackfin/mach-bf533/bfin_sir.h
index cefcf8bb505b..9bb87e9e2e9b 100644
--- a/include/asm-blackfin/mach-bf533/bfin_sir.h
+++ b/include/asm-blackfin/mach-bf533/bfin_sir.h
@@ -110,11 +110,16 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
#define DRIVER_NAME "bfin_sir"
-static void bfin_sir_hw_init(void)
+static int bfin_sir_hw_init(void)
{
+ int ret = -ENODEV;
#ifdef CONFIG_BFIN_SIR0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
- SSYNC();
+ return ret;
}
diff --git a/include/asm-blackfin/mach-bf533/mem_init.h b/include/asm-blackfin/mach-bf533/mem_init.h
index f8f31901fca9..995c06b2b1ef 100644
--- a/include/asm-blackfin/mach-bf533/mem_init.h
+++ b/include/asm-blackfin/mach-bf533/mem_init.h
@@ -133,33 +133,6 @@
#define SDRAM_CL CL_3
#endif
-#if (CONFIG_MEM_SIZE == 128)
-#define SDRAM_SIZE EBSZ_128
-#endif
-#if (CONFIG_MEM_SIZE == 64)
-#define SDRAM_SIZE EBSZ_64
-#endif
-#if (CONFIG_MEM_SIZE == 32)
-#define SDRAM_SIZE EBSZ_32
-#endif
-#if (CONFIG_MEM_SIZE == 16)
-#define SDRAM_SIZE EBSZ_16
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 11)
-#define SDRAM_WIDTH EBCAW_11
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 10)
-#define SDRAM_WIDTH EBCAW_10
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 9)
-#define SDRAM_WIDTH EBCAW_9
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 8)
-#define SDRAM_WIDTH EBCAW_8
-#endif
-
-#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE)
-
/* Equation from section 17 (p17-46) of BF533 HRM */
#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
diff --git a/include/asm-blackfin/mach-bf537/bfin_sir.h b/include/asm-blackfin/mach-bf537/bfin_sir.h
index 0612d0c9501c..cfd8ad4f1f2c 100644
--- a/include/asm-blackfin/mach-bf537/bfin_sir.h
+++ b/include/asm-blackfin/mach-bf537/bfin_sir.h
@@ -118,16 +118,25 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
#define DRIVER_NAME "bfin_sir"
-static void bfin_sir_hw_init(void)
+static int bfin_sir_hw_init(void)
{
+ int ret = -ENODEV;
#ifdef CONFIG_BFIN_SIR0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
#ifdef CONFIG_BFIN_SIR1
- peripheral_request(P_UART1_TX, DRIVER_NAME);
- peripheral_request(P_UART1_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART1_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART1_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
- SSYNC();
+ return ret;
}
diff --git a/include/asm-blackfin/mach-bf537/defBF537.h b/include/asm-blackfin/mach-bf537/defBF537.h
index 3f455909c418..abde24c6d3b1 100644
--- a/include/asm-blackfin/mach-bf537/defBF537.h
+++ b/include/asm-blackfin/mach-bf537/defBF537.h
@@ -290,6 +290,7 @@
#define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */
#define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */
#define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */
+#define TXDWA 0x00000010 /* Transmit Frame DMA Word Alignment (Odd/Even*) */
#define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */
#define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */
diff --git a/include/asm-blackfin/mach-bf537/mem_init.h b/include/asm-blackfin/mach-bf537/mem_init.h
index 9ad979d416c6..f67698f670ca 100644
--- a/include/asm-blackfin/mach-bf537/mem_init.h
+++ b/include/asm-blackfin/mach-bf537/mem_init.h
@@ -139,33 +139,6 @@
#define SDRAM_CL CL_3
#endif
-#if (CONFIG_MEM_SIZE == 128)
-#define SDRAM_SIZE EBSZ_128
-#endif
-#if (CONFIG_MEM_SIZE == 64)
-#define SDRAM_SIZE EBSZ_64
-#endif
-#if (CONFIG_MEM_SIZE == 32)
-#define SDRAM_SIZE EBSZ_32
-#endif
-#if (CONFIG_MEM_SIZE == 16)
-#define SDRAM_SIZE EBSZ_16
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 11)
-#define SDRAM_WIDTH EBCAW_11
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 10)
-#define SDRAM_WIDTH EBCAW_10
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 9)
-#define SDRAM_WIDTH EBCAW_9
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 8)
-#define SDRAM_WIDTH EBCAW_8
-#endif
-
-#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE)
-
/* Equation from section 17 (p17-46) of BF533 HRM */
#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
diff --git a/include/asm-blackfin/mach-bf548/bfin_sir.h b/include/asm-blackfin/mach-bf548/bfin_sir.h
index 5e94271c7e3b..c41f9cf00268 100644
--- a/include/asm-blackfin/mach-bf548/bfin_sir.h
+++ b/include/asm-blackfin/mach-bf548/bfin_sir.h
@@ -124,26 +124,43 @@ struct bfin_sir_self {
#define DRIVER_NAME "bfin_sir"
-static void bfin_sir_hw_init(void)
+static int bfin_sir_hw_init(void)
{
+ int ret = -ENODEV;
#ifdef CONFIG_BFIN_SIR0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
#ifdef CONFIG_BFIN_SIR1
- peripheral_request(P_UART1_TX, DRIVER_NAME);
- peripheral_request(P_UART1_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART1_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART1_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
#ifdef CONFIG_BFIN_SIR2
- peripheral_request(P_UART2_TX, DRIVER_NAME);
- peripheral_request(P_UART2_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART2_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART2_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
#ifdef CONFIG_BFIN_SIR3
- peripheral_request(P_UART3_TX, DRIVER_NAME);
- peripheral_request(P_UART3_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART3_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART3_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
- SSYNC();
+ return ret;
}
diff --git a/include/asm-blackfin/mach-bf548/gpio.h b/include/asm-blackfin/mach-bf548/gpio.h
index cb8b0f15c9a6..bba82dc75f16 100644
--- a/include/asm-blackfin/mach-bf548/gpio.h
+++ b/include/asm-blackfin/mach-bf548/gpio.h
@@ -209,3 +209,11 @@ struct gpio_port_t {
unsigned short dummy7;
unsigned int port_mux;
};
+
+struct gpio_port_s {
+ unsigned short fer;
+ unsigned short data;
+ unsigned short dir;
+ unsigned short inen;
+ unsigned int mux;
+};
diff --git a/include/asm-blackfin/mach-bf561/bfin_sir.h b/include/asm-blackfin/mach-bf561/bfin_sir.h
index cefcf8bb505b..9bb87e9e2e9b 100644
--- a/include/asm-blackfin/mach-bf561/bfin_sir.h
+++ b/include/asm-blackfin/mach-bf561/bfin_sir.h
@@ -110,11 +110,16 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
#define DRIVER_NAME "bfin_sir"
-static void bfin_sir_hw_init(void)
+static int bfin_sir_hw_init(void)
{
+ int ret = -ENODEV;
#ifdef CONFIG_BFIN_SIR0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
+ ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
+ if (ret)
+ return ret;
+ ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
+ if (ret)
+ return ret;
#endif
- SSYNC();
+ return ret;
}
diff --git a/include/asm-blackfin/mach-bf561/mem_init.h b/include/asm-blackfin/mach-bf561/mem_init.h
index 439a5895b346..e163260bca18 100644
--- a/include/asm-blackfin/mach-bf561/mem_init.h
+++ b/include/asm-blackfin/mach-bf561/mem_init.h
@@ -131,33 +131,6 @@
#define SDRAM_CL CL_3
#endif
-#if (CONFIG_MEM_SIZE == 128)
-#define SDRAM_SIZE EB0_SZ_128
-#endif
-#if (CONFIG_MEM_SIZE == 64)
-#define SDRAM_SIZE EB0_SZ_64
-#endif
-#if ( CONFIG_MEM_SIZE == 32)
-#define SDRAM_SIZE EB0_SZ_32
-#endif
-#if (CONFIG_MEM_SIZE == 16)
-#define SDRAM_SIZE EB0_SZ_16
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 11)
-#define SDRAM_WIDTH EB0_CAW_11
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 10)
-#define SDRAM_WIDTH EB0_CAW_10
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 9)
-#define SDRAM_WIDTH EB0_CAW_9
-#endif
-#if (CONFIG_MEM_ADD_WIDTH == 8)
-#define SDRAM_WIDTH EB0_CAW_8
-#endif
-
-#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EB0_E)
-
/* Equation from section 17 (p17-46) of BF533 HRM */
#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
diff --git a/include/asm-blackfin/module.h b/include/asm-blackfin/module.h
index 3c7ce1644280..e3128df139d6 100644
--- a/include/asm-blackfin/module.h
+++ b/include/asm-blackfin/module.h
@@ -6,8 +6,6 @@
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
-#define FLG_CODE_IN_L1 0x10
-#define FLG_DATA_IN_L1 0x20
struct mod_arch_specific {
Elf_Shdr *text_l1;
@@ -15,5 +13,8 @@ struct mod_arch_specific {
Elf_Shdr *bss_a_l1;
Elf_Shdr *data_b_l1;
Elf_Shdr *bss_b_l1;
+ Elf_Shdr *text_l2;
+ Elf_Shdr *data_l2;
+ Elf_Shdr *bss_l2;
};
#endif /* _ASM_BFIN_MODULE_H */
diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h
index 1c0040724612..6f3995b119d8 100644
--- a/include/asm-blackfin/processor.h
+++ b/include/asm-blackfin/processor.h
@@ -112,7 +112,26 @@ unsigned long get_wchan(struct task_struct *p);
static inline uint32_t __pure bfin_revid(void)
{
/* stored in the upper 4 bits */
- return bfin_read_CHIPID() >> 28;
+ uint32_t revid = bfin_read_CHIPID() >> 28;
+
+#ifdef CONFIG_BF52x
+ /* ANOMALY_05000357
+ * Incorrect Revision Number in DSPID Register
+ */
+ if (revid == 0)
+ switch (bfin_read16(_BOOTROM_GET_DXE_ADDRESS_TWI)) {
+ case 0x0010:
+ revid = 0;
+ break;
+ case 0x2796:
+ revid = 1;
+ break;
+ default:
+ revid = 0xFFFF;
+ break;
+ }
+#endif
+ return revid;
}
static inline uint32_t __pure bfin_compiled_revid(void)
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index edc8d1bfaae2..cb2fb25ff8d9 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
}
static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index 2e8966ca030d..b2898877c07b 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
}
static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index e2468f894d2a..82cd0cb1c3fe 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
#define dma_sync_sg_for_device dma_sync_sg_for_cpu
extern int
-dma_mapping_error(dma_addr_t dma_addr);
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int
dma_supported(struct device *dev, u64 mask);
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 783ab9944d70..189486c3f92e 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
}
static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return pci_dma_mapping_error(dma_addr);
+ return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
}
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h
index 25c10e96b2b7..37b3706226e7 100644
--- a/include/asm-generic/pci-dma-compat.h
+++ b/include/asm-generic/pci-dma-compat.h
@@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
}
static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
- return dma_mapping_error(dma_addr);
+ return dma_mapping_error(&pdev->dev, dma_addr);
}
#endif
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
new file mode 100644
index 000000000000..abcf34c2fdc7
--- /dev/null
+++ b/include/asm-generic/syscall.h
@@ -0,0 +1,141 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * This file is a stub providing documentation for what functions
+ * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * will be simple inlines.
+ *
+ * All of these functions expect to be called with no locks,
+ * and only when the caller is sure that the task of interest
+ * cannot return to user mode while we are looking at it.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+struct task_struct;
+struct pt_regs;
+
+/**
+ * syscall_get_nr - find what system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * If @task is executing a system call or is at system call
+ * tracing about to attempt one, returns the system call number.
+ * If @task is not executing a system call, i.e. it's blocked
+ * inside the kernel for a fault or signal, returns -1.
+ *
+ * It's only valid to call this when @task is known to be blocked.
+ */
+long syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_rollback - roll back registers after an aborted system call
+ * @task: task of interest, must be in system call exit tracing
+ * @regs: task_pt_regs() of @task
+ *
+ * It's only valid to call this when @task is stopped for system
+ * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
+ * after tracehook_report_syscall_entry() returned nonzero to prevent
+ * the system call from taking place.
+ *
+ * This rolls back the register state in @regs so it's as if the
+ * system call instruction was a no-op. The registers containing
+ * the system call number and arguments are as they were before the
+ * system call instruction. This may not be the same as what the
+ * register state looked like at system call entry tracing.
+ */
+void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_error - check result of traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_return_value - get the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns the return value of the successful system call.
+ * This value is meaningless if syscall_get_error() returned nonzero.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_set_return_value - change the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @error: negative error code, or zero to indicate success
+ * @val: user return value if @error is zero
+ *
+ * This changes the results of the system call that user mode will see.
+ * If @error is zero, the user sees a successful system call with a
+ * return value of @val. If @error is nonzero, it's a negated errno
+ * code; the user sees a failed system call with this errno code.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val);
+
+/**
+ * syscall_get_arguments - extract system call parameter values
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array filled with argument values
+ *
+ * Fetches @n arguments to the system call starting with the @i'th argument
+ * (from 0 through 5). Argument @i is stored in @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args);
+
+/**
+ * syscall_set_arguments - change system call parameter value
+ * @task: task of interest, must be in system call entry tracing
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array of argument values to store
+ *
+ * Changes @n arguments to the system call starting with the @i'th argument.
+ * @n'th argument to @val. Argument @i gets value @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args);
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 729f6b0a60e9..9cd44b162ba1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -359,6 +359,8 @@
}
#define INITCALLS \
+ *(.initcallearly.init) \
+ __early_initcall_end = .; \
*(.initcall0.init) \
*(.initcall0s.init) \
*(.initcall1.init) \
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index 0721a5e8271e..a6d50c77b6bf 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_
typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
+typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
typedef int ia64_mv_dma_supported (struct device *, u64);
typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index a26cdeb46a57..91f7944333d4 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s
{
}
-static inline int dma_mapping_error(dma_addr_t handle)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
{
return 0;
}
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 230b3f1b69b1..c64afb40cd06 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction);
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction);
-extern int dma_mapping_error(dma_addr_t dma_addr);
+extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int dma_supported(struct device *dev, u64 mask);
static inline int
diff --git a/include/asm-mn10300/dma-mapping.h b/include/asm-mn10300/dma-mapping.h
index 7c882fca9ec8..ccae8f6c6326 100644
--- a/include/asm-mn10300/dma-mapping.h
+++ b/include/asm-mn10300/dma-mapping.h
@@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index 2f1e1b05440a..b7ca6dc7fddc 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm);
extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \
- write_lock_irq(&(mapping)->tree_lock)
+ spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
- write_unlock_irq(&(mapping)->tree_lock)
+ spin_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index c6c0e9ff6bde..53af696f23d2 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev);
#endif
/* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(x) 0
+#define dma_mapping_error(dev, x) 0
#endif
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 74c549780987..c7ca45f97dd2 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_PPC64
return (dma_addr == DMA_ERROR_CODE);
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 22cc419389fe..6c0b8a2de143 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void)
return L1_CACHE_BYTES;
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h
index 38cbec76a33f..bfa64f9702d5 100644
--- a/include/asm-sparc/dma-mapping_64.h
+++ b/include/asm-sparc/dma-mapping_64.h
@@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
/* No flushing needed to sync cpu writes to the device. */
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
}
diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h
index b93b6c79e08f..0ee949d220c0 100644
--- a/include/asm-sparc/pci_32.h
+++ b/include/asm-sparc/pci_32.h
@@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+ dma_addr_t dma_addr)
{
return (dma_addr == PCI_DMA_ERROR_CODE);
}
diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h
index f59f2571295b..4f79a54948f6 100644
--- a/include/asm-sparc/pci_64.h
+++ b/include/asm-sparc/pci_64.h
@@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+ dma_addr_t dma_addr)
{
- return dma_mapping_error(dma_addr);
+ return dma_mapping_error(&pdev->dev, dma_addr);
}
#ifdef CONFIG_PCI
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 87a715367a1b..3c034f48fdb0 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
#ifdef CONFIG_ACPI
void *acpi_handle;
#endif
+#ifdef CONFIG_X86_64
+struct dma_mapping_ops *dma_ops;
+#endif
#ifdef CONFIG_DMAR
void *iommu; /* hook for IOMMU specific extension */
#endif
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index c2ddd3d1b883..0eaa9bf6011f 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -17,7 +17,8 @@ extern int panic_on_overflow;
extern int force_iommu;
struct dma_mapping_ops {
- int (*mapping_error)(dma_addr_t dma_addr);
+ int (*mapping_error)(struct device *dev,
+ dma_addr_t dma_addr);
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
@@ -56,14 +57,32 @@ struct dma_mapping_ops {
int is_phys;
};
-extern const struct dma_mapping_ops *dma_ops;
+extern struct dma_mapping_ops *dma_ops;
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
- if (dma_ops->mapping_error)
- return dma_ops->mapping_error(dma_addr);
+#ifdef CONFIG_X86_32
+ return dma_ops;
+#else
+ if (unlikely(!dev) || !dev->archdata.dma_ops)
+ return dma_ops;
+ else
+ return dev->archdata.dma_ops;
+#endif
+}
+
+/* Make sure we keep the same behaviour */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+#ifdef CONFIG_X86_32
+ return 0;
+#else
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
+#endif
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -83,44 +102,53 @@ static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+ return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->unmap_single)
- dma_ops->unmap_single(dev, addr, size, direction);
+ if (ops->unmap_single)
+ ops->unmap_single(dev, addr, size, direction);
}
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_sg(hwdev, sg, nents, direction);
+ return ops->map_sg(hwdev, sg, nents, direction);
}
static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->unmap_sg)
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
+ if (ops->unmap_sg)
+ ops->unmap_sg(hwdev, sg, nents, direction);
}
static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_cpu)
- dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
- direction);
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
@@ -128,10 +156,11 @@ static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_device)
- dma_ops->sync_single_for_device(hwdev, dma_handle, size,
- direction);
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
@@ -139,11 +168,12 @@ static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_cpu)
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
- size, direction);
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+ BUG_ON(!valid_dma_direction(direction));
+ if (ops->sync_single_range_for_cpu)
+ ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
+ size, direction);
flush_write_buffers();
}
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
int direction)
{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(hwdev, dma_handle,
- offset, size, direction);
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+ BUG_ON(!valid_dma_direction(direction));
+ if (ops->sync_single_range_for_device)
+ ops->sync_single_range_for_device(hwdev, dma_handle,
+ offset, size, direction);
flush_write_buffers();
}
@@ -164,9 +195,11 @@ static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_cpu)
- dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
+ if (ops->sync_sg_for_cpu)
+ ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
@@ -174,9 +207,11 @@ static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_device)
- dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+ if (ops->sync_sg_for_device)
+ ops->sync_sg_for_device(hwdev, sg, nelems, direction);
flush_write_buffers();
}
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(dev, page_to_phys(page)+offset,
- size, direction);
+ return ops->map_single(dev, page_to_phys(page) + offset,
+ size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index 116e9147fe66..c4c91b37c104 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -16,10 +16,6 @@
#ifndef _ASM_I386_GPIO_H
#define _ASM_I386_GPIO_H
-#ifdef CONFIG_X86_RDC321X
-#include <gpio.h>
-#else /* CONFIG_X86_RDC321X */
-
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
@@ -57,6 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* CONFIG_GPIOLIB */
-#endif /* CONFIG_X86_RDC321X */
-
#endif /* _ASM_I386_GPIO_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index d63166fb3ab7..ecc8061904a9 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -3,6 +3,7 @@
extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
+extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index 8f855a15f64d..c0e52a14fd4d 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -10,14 +10,15 @@
# define VA_PTE_0 5
# define PA_PTE_1 6
# define VA_PTE_1 7
+# define PA_SWAP_PAGE 8
# ifdef CONFIG_X86_PAE
-# define PA_PMD_0 8
-# define VA_PMD_0 9
-# define PA_PMD_1 10
-# define VA_PMD_1 11
-# define PAGES_NR 12
+# define PA_PMD_0 9
+# define VA_PMD_0 10
+# define PA_PMD_1 11
+# define VA_PMD_1 12
+# define PAGES_NR 13
# else
-# define PAGES_NR 8
+# define PAGES_NR 9
# endif
#else
# define PA_CONTROL_PAGE 0
@@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
#ifdef CONFIG_X86_32
-asmlinkage NORET_TYPE void
+asmlinkage unsigned long
relocate_kernel(unsigned long indirection_page,
unsigned long control_page,
unsigned long start_address,
- unsigned int has_pae) ATTRIB_NORET;
+ unsigned int has_pae,
+ unsigned int preserve_context);
#else
NORET_TYPE void
relocate_kernel(unsigned long indirection_page,
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index 75d2c95005d7..c47e2ab5c5ca 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
static inline physid_mask_t apicid_to_cpu_present(int apicid)
{
- return physid_mask_of_physid(0);
+ return physid_mask_of_physid(apicid);
}
static inline void setup_portio_remap(void)
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 3e5dbc4195f4..04caa2f544df 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -18,6 +18,7 @@
#define _PAGE_BIT_UNUSED2 10
#define _PAGE_BIT_UNUSED3 11
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
@@ -34,6 +35,8 @@
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
+#define __HAVE_ARCH_PTE_SPECIAL
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
@@ -54,7 +57,7 @@
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
- _PAGE_ACCESSED | _PAGE_DIRTY)
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
#define _PAGE_CACHE_WB (0)
@@ -180,7 +183,7 @@ static inline int pte_exec(pte_t pte)
static inline int pte_special(pte_t pte)
{
- return 0;
+ return pte_val(pte) & _PAGE_SPECIAL;
}
static inline int pmd_large(pmd_t pte)
@@ -246,7 +249,7 @@ static inline pte_t pte_clrglobal(pte_t pte)
static inline pte_t pte_mkspecial(pte_t pte)
{
- return pte;
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
extern pteval_t __supported_pte_mask;
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index c706a7442633..2730b351afcf 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
-extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
+extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index f6fa4d841bbc..5f702d1d5218 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -451,3 +451,4 @@ extern struct movsl_mask {
#endif
#endif
+
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index 3c7d537dd15d..51882ae3db4d 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b51ddd28444e..09b276c35227 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -7,7 +7,6 @@
#include <linux/uio.h>
#include <asm/atomic.h>
-#include <linux/uio.h>
#define AIO_MAXSEGS 4
#define AIO_KIOGRP_NR_ATOMIC 8
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 6cd39a927e1f..025e4f575103 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,7 +8,13 @@
#include <linux/proc_fs.h>
#define ELFCORE_ADDR_MAX (-1ULL)
+
+#ifdef CONFIG_PROC_VMCORE
extern unsigned long long elfcorehdr_addr;
+#else
+static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+#endif
+
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
extern const struct file_operations proc_vmcore_operations;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 49d8eb7a71be..53d2edb709b3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -499,7 +499,7 @@ struct backing_dev_info;
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root page_tree; /* radix tree of all pages */
- rwlock_t tree_lock; /* and rwlock protecting it */
+ spinlock_t tree_lock; /* and lock protecting it */
unsigned int i_mmap_writable;/* count VM_SHARED mappings */
struct prio_tree_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 7d51cbca49ab..75ae6d8aba4f 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
}
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
- if (!dma_mapping_error(dma_addr)) {
+ if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
*mptr++ = cpu_to_le32(0x7C020002);
diff --git a/include/linux/init.h b/include/linux/init.h
index 42ae95411a93..11b84e106053 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -170,6 +170,13 @@ extern void (*late_time_init)(void);
__attribute__((__section__(".initcall" level ".init"))) = fn
/*
+ * Early initcalls run before initializing SMP.
+ *
+ * Only for built-in code, not modules.
+ */
+#define early_initcall(fn) __define_initcall("early",fn,early)
+
+/*
* A "pure" initcall has no dependencies on anything else, and purely
* initializes variables that couldn't be statically initialized.
*
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 3265968cd2cd..82f88a8a827b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -83,6 +83,7 @@ struct kimage {
unsigned long start;
struct page *control_code_page;
+ struct page *swap_page;
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
@@ -98,18 +99,20 @@ struct kimage {
unsigned int type : 1;
#define KEXEC_TYPE_DEFAULT 0
#define KEXEC_TYPE_CRASH 1
+ unsigned int preserve_context : 1;
};
/* kexec interface functions */
-extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
+extern void machine_kexec(struct kimage *image);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
extern asmlinkage long sys_kexec_load(unsigned long entry,
unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags);
+extern int kernel_kexec(void);
#ifdef CONFIG_COMPAT
extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
unsigned long nr_segments,
@@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image;
#define kexec_flush_icache_page(page)
#endif
-#define KEXEC_ON_CRASH 0x00000001
-#define KEXEC_ARCH_MASK 0xffff0000
+#define KEXEC_ON_CRASH 0x00000001
+#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_ARCH_MASK 0xffff0000
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
@@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image;
#define KEXEC_ARCH_MIPS_LE (10 << 16)
#define KEXEC_ARCH_MIPS ( 8 << 16)
-#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */
+/* List of defined/legal kexec flags */
+#ifndef CONFIG_KEXEC_JUMP
+#define KEXEC_FLAGS KEXEC_ON_CRASH
+#else
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#endif
#define VMCOREINFO_BYTES (4096)
#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index 37a5cdb03918..a9f998a3f48b 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -263,6 +263,10 @@ struct memstick_dev {
/* Get next request from the media driver. */
int (*next_request)(struct memstick_dev *card,
struct memstick_request **mrq);
+ /* Tell the media driver to stop doing things */
+ void (*stop)(struct memstick_dev *card);
+ /* Allow the media driver to continue */
+ void (*start)(struct memstick_dev *card);
struct device dev;
};
@@ -284,7 +288,7 @@ struct memstick_host {
/* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host);
/* Set host IO parameters (power, clock, etc). */
- void (*set_param)(struct memstick_host *host,
+ int (*set_param)(struct memstick_host *host,
enum memstick_param param,
int value);
unsigned long private[0] ____cacheline_aligned;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d87a5a5fe87d..6e695eaab4ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -810,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
-void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -833,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
+#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
+/*
+ * get_user_pages_fast provides equivalent functionality to get_user_pages,
+ * operating on current and current->mm (force=0 and doesn't return any vmas).
+ *
+ * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
+ * can be made about locking. get_user_pages_fast is to be implemented in a
+ * way that is advantageous (vs get_user_pages()) when the user memory area is
+ * already faulted in and present in ptes. However if the pages have to be
+ * faulted in, it may turn out to be slightly slower).
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+
+#else
+/*
+ * Should probably be moved to asm-generic, and architectures can include it if
+ * they don't implement their own get_user_pages_fast.
+ */
+#define get_user_pages_fast(start, nr_pages, write, pages) \
+({ \
+ struct mm_struct *mm = current->mm; \
+ int ret; \
+ \
+ down_read(&mm->mmap_sem); \
+ ret = get_user_pages(current, mm, start, nr_pages, \
+ write, 0, pages, NULL); \
+ up_read(&mm->mmap_sem); \
+ \
+ ret; \
+})
+#endif
+
/*
* A callback you can register to apply pressure to ageable caches.
*
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ee1ec2c7723c..a81d81890422 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -12,6 +12,7 @@
#include <asm/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
+#include <linux/hardirq.h> /* for in_interrupt() */
/*
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
@@ -62,6 +63,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, int cold);
+/*
+ * speculatively take a reference to a page.
+ * If the page is free (_count == 0), then _count is untouched, and 0
+ * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
+ *
+ * This function must be called inside the same rcu_read_lock() section as has
+ * been used to lookup the page in the pagecache radix-tree (or page table):
+ * this allows allocators to use a synchronize_rcu() to stabilize _count.
+ *
+ * Unless an RCU grace period has passed, the count of all pages coming out
+ * of the allocator must be considered unstable. page_count may return higher
+ * than expected, and put_page must be able to do the right thing when the
+ * page has been finished with, no matter what it is subsequently allocated
+ * for (because put_page is what is used here to drop an invalid speculative
+ * reference).
+ *
+ * This is the interesting part of the lockless pagecache (and lockless
+ * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
+ * has the following pattern:
+ * 1. find page in radix tree
+ * 2. conditionally increment refcount
+ * 3. check the page is still in pagecache (if no, goto 1)
+ *
+ * Remove-side that cares about stability of _count (eg. reclaim) has the
+ * following (with tree_lock held for write):
+ * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
+ * B. remove page from pagecache
+ * C. free the page
+ *
+ * There are 2 critical interleavings that matter:
+ * - 2 runs before A: in this case, A sees elevated refcount and bails out
+ * - A runs before 2: in this case, 2 sees zero refcount and retries;
+ * subsequently, B will complete and 1 will find no page, causing the
+ * lookup to return NULL.
+ *
+ * It is possible that between 1 and 2, the page is removed then the exact same
+ * page is inserted into the same position in pagecache. That's OK: the
+ * old find_get_page using tree_lock could equally have run before or after
+ * such a re-insertion, depending on order that locks are granted.
+ *
+ * Lookups racing against pagecache insertion isn't a big problem: either 1
+ * will find the page or it will not. Likewise, the old find_get_page could run
+ * either before the insertion or afterwards, depending on timing.
+ */
+static inline int page_cache_get_speculative(struct page *page)
+{
+ VM_BUG_ON(in_interrupt());
+
+#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+# ifdef CONFIG_PREEMPT
+ VM_BUG_ON(!in_atomic());
+# endif
+ /*
+ * Preempt must be disabled here - we rely on rcu_read_lock doing
+ * this for us.
+ *
+ * Pagecache won't be truncated from interrupt context, so if we have
+ * found a page in the radix tree here, we have pinned its refcount by
+ * disabling preempt, and hence no need for the "speculative get" that
+ * SMP requires.
+ */
+ VM_BUG_ON(page_count(page) == 0);
+ atomic_inc(&page->_count);
+
+#else
+ if (unlikely(!get_page_unless_zero(page))) {
+ /*
+ * Either the page has been freed, or will be freed.
+ * In either case, retry here and the caller should
+ * do the right thing (see comments above).
+ */
+ return 0;
+ }
+#endif
+ VM_BUG_ON(PageTail(page));
+
+ return 1;
+}
+
+static inline int page_freeze_refs(struct page *page, int count)
+{
+ return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+}
+
+static inline void page_unfreeze_refs(struct page *page, int count)
+{
+ VM_BUG_ON(page_count(page) != 0);
+ VM_BUG_ON(count == 0);
+
+ atomic_set(&page->_count, count);
+}
+
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
@@ -133,7 +226,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, filler, data);
}
-int add_to_page_cache(struct page *page, struct address_space *mapping,
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
@@ -141,6 +234,22 @@ extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
/*
+ * Like add_to_page_cache_locked, but used to add newly allocated pages:
+ * the page is new, so we can just run SetPageLocked() against it.
+ */
+static inline int add_to_page_cache(struct page *page,
+ struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+{
+ int error;
+
+ SetPageLocked(page);
+ error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
+ if (unlikely(error))
+ ClearPageLocked(page);
+ return error;
+}
+
+/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/parport.h b/include/linux/parport.h
index dcb9e01a69ca..6a0d7cdb5774 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device);
#endif /* !CONFIG_PARPORT_NOT_PC */
+extern unsigned long parport_default_timeslice;
+extern int parport_default_spintime;
+
#endif /* __KERNEL__ */
#endif /* _PARPORT_H_ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4cdd393e71e1..fac3337547eb 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -74,11 +74,6 @@ struct percpu_data {
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu);
-extern void percpu_depopulate(void *__pdata, int cpu);
-extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
- cpumask_t *mask);
-extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask);
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
extern void percpu_free(void *__pdata);
@@ -86,26 +81,6 @@ extern void percpu_free(void *__pdata);
#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static inline void percpu_depopulate(void *__pdata, int cpu)
-{
-}
-
-static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
-{
-}
-
-static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp,
- int cpu)
-{
- return percpu_ptr(__pdata, cpu);
-}
-
-static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
- cpumask_t *mask)
-{
- return 0;
-}
-
static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
{
return kzalloc(size, gfp);
@@ -118,10 +93,6 @@ static inline void percpu_free(void *__pdata)
#endif /* CONFIG_SMP */
-#define percpu_populate_mask(__pdata, size, gfp, mask) \
- __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
-#define percpu_depopulate_mask(__pdata, mask) \
- __percpu_depopulate_mask((__pdata), &(mask))
#define percpu_alloc_mask(size, gfp, mask) \
__percpu_alloc_mask((size), (gfp), &(mask))
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index c6f5f9dd0cee..fd31756e1a00 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child)
int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
+/**
+ * task_ptrace - return %PT_* flags that apply to a task
+ * @task: pointer to &task_struct in question
+ *
+ * Returns the %PT_* flags that apply to @task.
+ */
+static inline int task_ptrace(struct task_struct *task)
+{
+ return task->ptrace;
+}
+
+/**
+ * ptrace_event - possibly stop for a ptrace event notification
+ * @mask: %PT_* bit to check in @current->ptrace
+ * @event: %PTRACE_EVENT_* value to report if @mask is set
+ * @message: value for %PTRACE_GETEVENTMSG to return
+ *
+ * This checks the @mask bit to see if ptrace wants stops for this event.
+ * If so we stop, reporting @event and @message to the ptrace parent.
+ *
+ * Returns nonzero if we did a ptrace notification, zero if not.
+ *
+ * Called without locks.
+ */
+static inline int ptrace_event(int mask, int event, unsigned long message)
+{
+ if (mask && likely(!(current->ptrace & mask)))
+ return 0;
+ current->ptrace_message = message;
+ ptrace_notify((event << 8) | SIGTRAP);
+ return 1;
+}
+
+/**
+ * ptrace_init_task - initialize ptrace state for a new child
+ * @child: new child task
+ * @ptrace: true if child should be ptrace'd by parent's tracer
+ *
+ * This is called immediately after adding @child to its parent's children
+ * list. @ptrace is false in the normal case, and true to ptrace @child.
+ *
+ * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
+ */
+static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
+{
+ INIT_LIST_HEAD(&child->ptrace_entry);
+ INIT_LIST_HEAD(&child->ptraced);
+ child->parent = child->real_parent;
+ child->ptrace = 0;
+ if (unlikely(ptrace)) {
+ child->ptrace = current->ptrace;
+ __ptrace_link(child, current->parent);
+ }
+}
+
+/**
+ * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
+ * @task: task in %EXIT_DEAD state
+ *
+ * Called with write_lock(&tasklist_lock) held.
+ */
+static inline void ptrace_release_task(struct task_struct *task)
+{
+ BUG_ON(!list_empty(&task->ptraced));
+ ptrace_unlink(task);
+ BUG_ON(!list_empty(&task->ptrace_entry));
+}
+
#ifndef force_successful_syscall_return
/*
* System call handlers that, upon successful completion, need to return a
@@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task)
#define arch_ptrace_stop(code, info) do { } while (0)
#endif
+extern int task_current_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc);
+
#endif
#endif
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index b8ce2b444bb5..a916c6660dfa 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -99,12 +99,15 @@ do { \
*
* The notable exceptions to this rule are the following functions:
* radix_tree_lookup
+ * radix_tree_lookup_slot
* radix_tree_tag_get
* radix_tree_gang_lookup
+ * radix_tree_gang_lookup_slot
* radix_tree_gang_lookup_tag
+ * radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
- * The first 4 functions are able to be called locklessly, using RCU. The
+ * The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
@@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
+unsigned int
+radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items);
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
@@ -173,6 +179,10 @@ unsigned int
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
unsigned int tag);
+unsigned int
+radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
static inline void radix_tree_preload_end(void)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 6cd8c4425fc7..953fc055e875 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -48,6 +48,7 @@ struct rchan_buf
size_t *padding; /* padding counts per sub-buffer */
size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
+ size_t early_bytes; /* bytes consumed before VFS inited */
unsigned int cpu; /* this buf's cpu */
} ____cacheline_aligned;
@@ -68,6 +69,7 @@ struct rchan
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
struct dentry *parent; /* parent dentry passed to open */
+ int has_base_filename; /* has a filename associated? */
char base_filename[NAME_MAX]; /* saved base filename */
};
@@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename,
size_t n_subbufs,
struct rchan_callbacks *cb,
void *private_data);
+extern int relay_late_setup_files(struct rchan *chan,
+ const char *base_filename,
+ struct dentry *parent);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
extern void relay_subbufs_consumed(struct rchan *chan,
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b01fe004cb5e..91f597ad6acc 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -225,8 +225,6 @@ typedef struct rtc_task {
int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
-void rtc_get_rtc_time(struct rtc_time *rtc_tm);
-irqreturn_t rtc_interrupt(int irq, void *dev_id);
#endif /* __KERNEL__ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 42036ffe6b00..f59318a0099b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -292,7 +292,6 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void);
-extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
@@ -1797,7 +1796,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
-extern void do_notify_parent(struct task_struct *, int);
+extern int do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
@@ -1883,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
-extern void wait_task_inactive(struct task_struct * p);
+extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
-#define wait_task_inactive(p) do { } while (0)
+static inline unsigned long wait_task_inactive(struct task_struct *p,
+ long match_state)
+{
+ return 1;
+}
#endif
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
@@ -2139,16 +2142,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
extern void arch_pick_mmap_layout(struct mm_struct *mm);
-#else
-static inline void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
-}
-#endif
#ifdef CONFIG_TRACING
extern void
@@ -2231,14 +2225,6 @@ static inline void inc_syscw(struct task_struct *tsk)
}
#endif
-#ifdef CONFIG_SMP
-void migration_init(void);
-#else
-static inline void migration_init(void)
-{
-}
-#endif
-
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 41103910f8a2..9ff8e8499403 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -58,7 +58,7 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
- void (*)(struct kmem_cache *, void *));
+ void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d117ea2825a9..5bad61a93f65 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -85,7 +85,7 @@ struct kmem_cache {
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(struct kmem_cache *, void *);
+ void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
const char *name; /* Name (only for display!) */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 48262f86c969..66484d4a8459 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data);
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void);
-void init_call_single_data(void);
void ipi_call_lock(void);
void ipi_call_unlock(void);
void ipi_call_lock_irq(void);
void ipi_call_unlock_irq(void);
-#else
-static inline void init_call_single_data(void)
-{
-}
#endif
/*
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 4bf8cade9dbc..e530026eedf7 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
- return pci_dma_mapping_error(addr);
+ return pci_dma_mapping_error(dev->bus->host_pci, addr);
case SSB_BUSTYPE_SSB:
- return dma_mapping_error(addr);
+ return dma_mapping_error(dev->dev, addr);
default:
__ssb_dma_not_implemented(dev);
}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index e8e69159af71..c63435095970 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e)
}
#endif
+extern struct mutex pm_mutex;
+
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 0b3377650c85..de40f169a4e4 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
/* linux/mm/swapfile.c */
extern long total_swap_pages;
-extern unsigned int nr_swapfiles;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
@@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *);
struct backing_dev_info;
-extern spinlock_t swap_lock;
-
/* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm;
extern void grab_swap_token(void);
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
new file mode 100644
index 000000000000..589f429619c9
--- /dev/null
+++ b/include/linux/tracehook.h
@@ -0,0 +1,575 @@
+/*
+ * Tracing hooks
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * This file defines hook entry points called by core code where
+ * user tracing/debugging support might need to do something. These
+ * entry points are called tracehook_*(). Each hook declared below
+ * has a detailed kerneldoc comment giving the context (locking et
+ * al) from which it is called, and the meaning of its return value.
+ *
+ * Each function here typically has only one call site, so it is ok
+ * to have some nontrivial tracehook_*() inlines. In all cases, the
+ * fast path when no tracing is enabled should be very short.
+ *
+ * The purpose of this file and the tracehook_* layer is to consolidate
+ * the interface that the kernel core and arch code uses to enable any
+ * user debugging or tracing facility (such as ptrace). The interfaces
+ * here are carefully documented so that maintainers of core and arch
+ * code do not need to think about the implementation details of the
+ * tracing facilities. Likewise, maintainers of the tracing code do not
+ * need to understand all the calling core or arch code in detail, just
+ * documented circumstances of each call, such as locking conditions.
+ *
+ * If the calling core code changes so that locking is different, then
+ * it is ok to change the interface documented here. The maintainer of
+ * core code changing should notify the maintainers of the tracing code
+ * that they need to work out the change.
+ *
+ * Some tracehook_*() inlines take arguments that the current tracing
+ * implementations might not necessarily use. These function signatures
+ * are chosen to pass in all the information that is on hand in the
+ * caller and might conceivably be relevant to a tracer, so that the
+ * core code won't have to be updated when tracing adds more features.
+ * If a call site changes so that some of those parameters are no longer
+ * already on hand without extra work, then the tracehook_* interface
+ * can change so there is no make-work burden on the core code. The
+ * maintainer of core code changing should notify the maintainers of the
+ * tracing code that they need to work out the change.
+ */
+
+#ifndef _LINUX_TRACEHOOK_H
+#define _LINUX_TRACEHOOK_H 1
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/security.h>
+struct linux_binprm;
+
+/**
+ * tracehook_expect_breakpoints - guess if task memory might be touched
+ * @task: current task, making a new mapping
+ *
+ * Return nonzero if @task is expected to want breakpoint insertion in
+ * its memory at some point. A zero return is no guarantee it won't
+ * be done, but this is a hint that it's known to be likely.
+ *
+ * May be called with @task->mm->mmap_sem held for writing.
+ */
+static inline int tracehook_expect_breakpoints(struct task_struct *task)
+{
+ return (task_ptrace(task) & PT_PTRACED) != 0;
+}
+
+/*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+static inline void ptrace_report_syscall(struct pt_regs *regs)
+{
+ int ptrace = task_ptrace(current);
+
+ if (!(ptrace & PT_PTRACED))
+ return;
+
+ ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
+
+/**
+ * tracehook_report_syscall_entry - task is about to attempt a system call
+ * @regs: user register state of current task
+ *
+ * This will be called if %TIF_SYSCALL_TRACE has been set, when the
+ * current task has just entered the kernel for a system call.
+ * Full user register state is available here. Changing the values
+ * in @regs can affect the system call number and arguments to be tried.
+ * It is safe to block here, preventing the system call from beginning.
+ *
+ * Returns zero normally, or nonzero if the calling arch code should abort
+ * the system call. That must prevent normal entry so no system call is
+ * made. If @task ever returns to user mode after this, its register state
+ * is unspecified, but should be something harmless like an %ENOSYS error
+ * return. It should preserve enough information so that syscall_rollback()
+ * can work (see asm-generic/syscall.h).
+ *
+ * Called without locks, just after entering kernel mode.
+ */
+static inline __must_check int tracehook_report_syscall_entry(
+ struct pt_regs *regs)
+{
+ ptrace_report_syscall(regs);
+ return 0;
+}
+
+/**
+ * tracehook_report_syscall_exit - task has just finished a system call
+ * @regs: user register state of current task
+ * @step: nonzero if simulating single-step or block-step
+ *
+ * This will be called if %TIF_SYSCALL_TRACE has been set, when the
+ * current task has just finished an attempted system call. Full
+ * user register state is available here. It is safe to block here,
+ * preventing signals from being processed.
+ *
+ * If @step is nonzero, this report is also in lieu of the normal
+ * trap that would follow the system call instruction because
+ * user_enable_block_step() or user_enable_single_step() was used.
+ * In this case, %TIF_SYSCALL_TRACE might not be set.
+ *
+ * Called without locks, just before checking for pending signals.
+ */
+static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
+{
+ ptrace_report_syscall(regs);
+}
+
+/**
+ * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
+ * @task: current task doing exec
+ *
+ * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
+ *
+ * Called with task_lock() held on @task.
+ */
+static inline int tracehook_unsafe_exec(struct task_struct *task)
+{
+ int unsafe = 0;
+ int ptrace = task_ptrace(task);
+ if (ptrace & PT_PTRACED) {
+ if (ptrace & PT_PTRACE_CAP)
+ unsafe |= LSM_UNSAFE_PTRACE_CAP;
+ else
+ unsafe |= LSM_UNSAFE_PTRACE;
+ }
+ return unsafe;
+}
+
+/**
+ * tracehook_tracer_task - return the task that is tracing the given task
+ * @tsk: task to consider
+ *
+ * Returns NULL if noone is tracing @task, or the &struct task_struct
+ * pointer to its tracer.
+ *
+ * Must called under rcu_read_lock(). The pointer returned might be kept
+ * live only by RCU. During exec, this may be called with task_lock()
+ * held on @task, still held from when tracehook_unsafe_exec() was called.
+ */
+static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
+{
+ if (task_ptrace(tsk) & PT_PTRACED)
+ return rcu_dereference(tsk->parent);
+ return NULL;
+}
+
+/**
+ * tracehook_report_exec - a successful exec was completed
+ * @fmt: &struct linux_binfmt that performed the exec
+ * @bprm: &struct linux_binprm containing exec details
+ * @regs: user-mode register state
+ *
+ * An exec just completed, we are shortly going to return to user mode.
+ * The freshly initialized register state can be seen and changed in @regs.
+ * The name, file and other pointers in @bprm are still on hand to be
+ * inspected, but will be freed as soon as this returns.
+ *
+ * Called with no locks, but with some kernel resources held live
+ * and a reference on @fmt->module.
+ */
+static inline void tracehook_report_exec(struct linux_binfmt *fmt,
+ struct linux_binprm *bprm,
+ struct pt_regs *regs)
+{
+ if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
+ unlikely(task_ptrace(current) & PT_PTRACED))
+ send_sig(SIGTRAP, current, 0);
+}
+
+/**
+ * tracehook_report_exit - task has begun to exit
+ * @exit_code: pointer to value destined for @current->exit_code
+ *
+ * @exit_code points to the value passed to do_exit(), which tracing
+ * might change here. This is almost the first thing in do_exit(),
+ * before freeing any resources or setting the %PF_EXITING flag.
+ *
+ * Called with no locks held.
+ */
+static inline void tracehook_report_exit(long *exit_code)
+{
+ ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
+}
+
+/**
+ * tracehook_prepare_clone - prepare for new child to be cloned
+ * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
+ *
+ * This is called before a new user task is to be cloned.
+ * Its return value will be passed to tracehook_finish_clone().
+ *
+ * Called with no locks held.
+ */
+static inline int tracehook_prepare_clone(unsigned clone_flags)
+{
+ if (clone_flags & CLONE_UNTRACED)
+ return 0;
+
+ if (clone_flags & CLONE_VFORK) {
+ if (current->ptrace & PT_TRACE_VFORK)
+ return PTRACE_EVENT_VFORK;
+ } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
+ if (current->ptrace & PT_TRACE_CLONE)
+ return PTRACE_EVENT_CLONE;
+ } else if (current->ptrace & PT_TRACE_FORK)
+ return PTRACE_EVENT_FORK;
+
+ return 0;
+}
+
+/**
+ * tracehook_finish_clone - new child created and being attached
+ * @child: new child task
+ * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
+ * @trace: return value from tracehook_clone_prepare()
+ *
+ * This is called immediately after adding @child to its parent's children list.
+ * The @trace value is that returned by tracehook_prepare_clone().
+ *
+ * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
+ */
+static inline void tracehook_finish_clone(struct task_struct *child,
+ unsigned long clone_flags, int trace)
+{
+ ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
+}
+
+/**
+ * tracehook_report_clone - in parent, new child is about to start running
+ * @trace: return value from tracehook_clone_prepare()
+ * @regs: parent's user register state
+ * @clone_flags: flags from parent's system call
+ * @pid: new child's PID in the parent's namespace
+ * @child: new child task
+ *
+ * Called after a child is set up, but before it has been started running.
+ * The @trace value is that returned by tracehook_clone_prepare().
+ * This is not a good place to block, because the child has not started yet.
+ * Suspend the child here if desired, and block in tracehook_clone_complete().
+ * This must prevent the child from self-reaping if tracehook_clone_complete()
+ * uses the @child pointer; otherwise it might have died and been released by
+ * the time tracehook_report_clone_complete() is called.
+ *
+ * Called with no locks held, but the child cannot run until this returns.
+ */
+static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
+ unsigned long clone_flags,
+ pid_t pid, struct task_struct *child)
+{
+ if (unlikely(trace)) {
+ /*
+ * The child starts up with an immediate SIGSTOP.
+ */
+ sigaddset(&child->pending.signal, SIGSTOP);
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+ }
+}
+
+/**
+ * tracehook_report_clone_complete - new child is running
+ * @trace: return value from tracehook_clone_prepare()
+ * @regs: parent's user register state
+ * @clone_flags: flags from parent's system call
+ * @pid: new child's PID in the parent's namespace
+ * @child: child task, already running
+ *
+ * This is called just after the child has started running. This is
+ * just before the clone/fork syscall returns, or blocks for vfork
+ * child completion if @clone_flags has the %CLONE_VFORK bit set.
+ * The @child pointer may be invalid if a self-reaping child died and
+ * tracehook_report_clone() took no action to prevent it from self-reaping.
+ *
+ * Called with no locks held.
+ */
+static inline void tracehook_report_clone_complete(int trace,
+ struct pt_regs *regs,
+ unsigned long clone_flags,
+ pid_t pid,
+ struct task_struct *child)
+{
+ if (unlikely(trace))
+ ptrace_event(0, trace, pid);
+}
+
+/**
+ * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
+ * @child: child task, already running
+ * @pid: new child's PID in the parent's namespace
+ *
+ * Called after a %CLONE_VFORK parent has waited for the child to complete.
+ * The clone/vfork system call will return immediately after this.
+ * The @child pointer may be invalid if a self-reaping child died and
+ * tracehook_report_clone() took no action to prevent it from self-reaping.
+ *
+ * Called with no locks held.
+ */
+static inline void tracehook_report_vfork_done(struct task_struct *child,
+ pid_t pid)
+{
+ ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
+}
+
+/**
+ * tracehook_prepare_release_task - task is being reaped, clean up tracing
+ * @task: task in %EXIT_DEAD state
+ *
+ * This is called in release_task() just before @task gets finally reaped
+ * and freed. This would be the ideal place to remove and clean up any
+ * tracing-related state for @task.
+ *
+ * Called with no locks held.
+ */
+static inline void tracehook_prepare_release_task(struct task_struct *task)
+{
+}
+
+/**
+ * tracehook_finish_release_task - task is being reaped, clean up tracing
+ * @task: task in %EXIT_DEAD state
+ *
+ * This is called in release_task() when @task is being in the middle of
+ * being reaped. After this, there must be no tracing entanglements.
+ *
+ * Called with write_lock_irq(&tasklist_lock) held.
+ */
+static inline void tracehook_finish_release_task(struct task_struct *task)
+{
+ ptrace_release_task(task);
+}
+
+/**
+ * tracehook_signal_handler - signal handler setup is complete
+ * @sig: number of signal being delivered
+ * @info: siginfo_t of signal being delivered
+ * @ka: sigaction setting that chose the handler
+ * @regs: user register state
+ * @stepping: nonzero if debugger single-step or block-step in use
+ *
+ * Called by the arch code after a signal handler has been set up.
+ * Register and stack state reflects the user handler about to run.
+ * Signal mask changes have already been made.
+ *
+ * Called without locks, shortly before returning to user mode
+ * (or handling more signals).
+ */
+static inline void tracehook_signal_handler(int sig, siginfo_t *info,
+ const struct k_sigaction *ka,
+ struct pt_regs *regs, int stepping)
+{
+ if (stepping)
+ ptrace_notify(SIGTRAP);
+}
+
+/**
+ * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
+ * @task: task receiving the signal
+ * @sig: signal number being sent
+ * @handler: %SIG_IGN or %SIG_DFL
+ *
+ * Return zero iff tracing doesn't care to examine this ignored signal,
+ * so it can short-circuit normal delivery and never even get queued.
+ * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
+ *
+ * Called with @task->sighand->siglock held.
+ */
+static inline int tracehook_consider_ignored_signal(struct task_struct *task,
+ int sig,
+ void __user *handler)
+{
+ return (task_ptrace(task) & PT_PTRACED) != 0;
+}
+
+/**
+ * tracehook_consider_fatal_signal - suppress special handling of fatal signal
+ * @task: task receiving the signal
+ * @sig: signal number being sent
+ * @handler: %SIG_DFL or %SIG_IGN
+ *
+ * Return nonzero to prevent special handling of this termination signal.
+ * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored,
+ * in which case force_sig() is about to reset it to %SIG_DFL.
+ * When this returns zero, this signal might cause a quick termination
+ * that does not give the debugger a chance to intercept the signal.
+ *
+ * Called with or without @task->sighand->siglock held.
+ */
+static inline int tracehook_consider_fatal_signal(struct task_struct *task,
+ int sig,
+ void __user *handler)
+{
+ return (task_ptrace(task) & PT_PTRACED) != 0;
+}
+
+/**
+ * tracehook_force_sigpending - let tracing force signal_pending(current) on
+ *
+ * Called when recomputing our signal_pending() flag. Return nonzero
+ * to force the signal_pending() flag on, so that tracehook_get_signal()
+ * will be called before the next return to user mode.
+ *
+ * Called with @current->sighand->siglock held.
+ */
+static inline int tracehook_force_sigpending(void)
+{
+ return 0;
+}
+
+/**
+ * tracehook_get_signal - deliver synthetic signal to traced task
+ * @task: @current
+ * @regs: task_pt_regs(@current)
+ * @info: details of synthetic signal
+ * @return_ka: sigaction for synthetic signal
+ *
+ * Return zero to check for a real pending signal normally.
+ * Return -1 after releasing the siglock to repeat the check.
+ * Return a signal number to induce an artifical signal delivery,
+ * setting *@info and *@return_ka to specify its details and behavior.
+ *
+ * The @return_ka->sa_handler value controls the disposition of the
+ * signal, no matter the signal number. For %SIG_DFL, the return value
+ * is a representative signal to indicate the behavior (e.g. %SIGTERM
+ * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
+ * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
+ * reported will be @info->si_signo instead.
+ *
+ * Called with @task->sighand->siglock held, before dequeuing pending signals.
+ */
+static inline int tracehook_get_signal(struct task_struct *task,
+ struct pt_regs *regs,
+ siginfo_t *info,
+ struct k_sigaction *return_ka)
+{
+ return 0;
+}
+
+/**
+ * tracehook_notify_jctl - report about job control stop/continue
+ * @notify: nonzero if this is the last thread in the group to stop
+ * @why: %CLD_STOPPED or %CLD_CONTINUED
+ *
+ * This is called when we might call do_notify_parent_cldstop().
+ * It's called when about to stop for job control; we are already in
+ * %TASK_STOPPED state, about to call schedule(). It's also called when
+ * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
+ *
+ * Return nonzero to generate a %SIGCHLD with @why, which is
+ * normal if @notify is nonzero.
+ *
+ * Called with no locks held.
+ */
+static inline int tracehook_notify_jctl(int notify, int why)
+{
+ return notify || (current->ptrace & PT_PTRACED);
+}
+
+/**
+ * tracehook_notify_death - task is dead, ready to notify parent
+ * @task: @current task now exiting
+ * @death_cookie: value to pass to tracehook_report_death()
+ * @group_dead: nonzero if this was the last thread in the group to die
+ *
+ * Return the signal number to send our parent with do_notify_parent(), or
+ * zero to send no signal and leave a zombie, or -1 to self-reap right now.
+ *
+ * Called with write_lock_irq(&tasklist_lock) held.
+ */
+static inline int tracehook_notify_death(struct task_struct *task,
+ void **death_cookie, int group_dead)
+{
+ if (task->exit_signal == -1)
+ return task->ptrace ? SIGCHLD : -1;
+
+ /*
+ * If something other than our normal parent is ptracing us, then
+ * send it a SIGCHLD instead of honoring exit_signal. exit_signal
+ * only has special meaning to our real parent.
+ */
+ if (thread_group_empty(task) && !ptrace_reparented(task))
+ return task->exit_signal;
+
+ return task->ptrace ? SIGCHLD : 0;
+}
+
+/**
+ * tracehook_report_death - task is dead and ready to be reaped
+ * @task: @current task now exiting
+ * @signal: signal number sent to parent, or 0 or -1
+ * @death_cookie: value passed back from tracehook_notify_death()
+ * @group_dead: nonzero if this was the last thread in the group to die
+ *
+ * Thread has just become a zombie or is about to self-reap. If positive,
+ * @signal is the signal number just sent to the parent (usually %SIGCHLD).
+ * If @signal is -1, this thread will self-reap. If @signal is 0, this is
+ * a delayed_group_leader() zombie. The @death_cookie was passed back by
+ * tracehook_notify_death().
+ *
+ * If normal reaping is not inhibited, @task->exit_state might be changing
+ * in parallel.
+ *
+ * Called without locks.
+ */
+static inline void tracehook_report_death(struct task_struct *task,
+ int signal, void *death_cookie,
+ int group_dead)
+{
+}
+
+#ifdef TIF_NOTIFY_RESUME
+/**
+ * set_notify_resume - cause tracehook_notify_resume() to be called
+ * @task: task that will call tracehook_notify_resume()
+ *
+ * Calling this arranges that @task will call tracehook_notify_resume()
+ * before returning to user mode. If it's already running in user mode,
+ * it will enter the kernel and call tracehook_notify_resume() soon.
+ * If it's blocked, it will not be woken.
+ */
+static inline void set_notify_resume(struct task_struct *task)
+{
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
+ kick_process(task);
+}
+
+/**
+ * tracehook_notify_resume - report when about to return to user mode
+ * @regs: user-mode registers of @current task
+ *
+ * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
+ * about to return to user mode, and the user state in @regs can be
+ * inspected or adjusted. The caller in arch code has cleared
+ * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+ * Called without locks.
+ */
+static inline void tracehook_notify_resume(struct pt_regs *regs)
+{
+}
+#endif /* TIF_NOTIFY_RESUME */
+
+#endif /* <linux/tracehook.h> */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 90b529f7a154..936e333e7ce5 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
if (dev->dma_ops)
return dev->dma_ops->mapping_error(dev, dma_addr);
- return dma_mapping_error(dma_addr);
+ return dma_mapping_error(dev->dma_device, dma_addr);
}
/**
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 1ccf462b433a..613173b5db69 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -22,6 +22,7 @@
#ifndef __ATMEL_LCDC_H__
#define __ATMEL_LCDC_H__
+#include <linux/workqueue.h>
/* Way LCD wires are connected to the chip:
* Some Atmel chips use BGR color mode (instead of standard RGB)