aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ia64/err_inject.txt2
-rw-r--r--Documentation/kdump/kdump.txt1
-rw-r--r--Documentation/s390/s390dbf.txt3
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c18
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/futex.h5
-rw-r--r--arch/ia64/include/asm/mca.h1
-rw-r--r--arch/ia64/include/asm/numa.h5
-rw-r--r--arch/ia64/kernel/fsys.S49
-rw-r--r--arch/ia64/kernel/iosapic.c34
-rw-r--r--arch/ia64/kernel/irq.c8
-rw-r--r--arch/ia64/kernel/mca.c37
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/ioremap.c14
-rw-r--r--arch/ia64/mm/numa.c5
-rw-r--r--arch/ia64/pci/pci.c11
-rw-r--r--arch/ia64/sn/kernel/tiocx.c5
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/pci/pci.c8
-rw-r--r--arch/parisc/Makefile6
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/include/asm/pgtable.h47
-rw-r--r--arch/parisc/include/asm/uaccess.h14
-rw-r--r--arch/parisc/kernel/cache.c5
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/lib/Makefile3
-rw-r--r--arch/parisc/lib/ucmpdi2.c25
-rw-r--r--arch/s390/Kconfig13
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c4
-rw-r--r--arch/s390/include/asm/bitops.h117
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h57
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_debug.h9
-rw-r--r--arch/s390/include/asm/pci_insn.h203
-rw-r--r--arch/s390/include/asm/pci_io.h16
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h6
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h20
-rw-r--r--arch/s390/include/uapi/asm/statfs.h63
-rw-r--r--arch/s390/kernel/Makefile17
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/dumpstack.c236
-rw-r--r--arch/s390/kernel/entry.S39
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/entry64.S43
-rw-r--r--arch/s390/kernel/machine_kexec.c30
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/kernel/smp.c15
-rw-r--r--arch/s390/kernel/suspend.c31
-rw-r--r--arch/s390/kernel/swsusp_asm64.S29
-rw-r--r--arch/s390/kernel/traps.c250
-rw-r--r--arch/s390/kvm/trace.h2
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/init.c10
-rw-r--r--arch/s390/mm/pageattr.c24
-rw-r--r--arch/s390/mm/pgtable.c235
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/pci/Makefile4
-rw-r--r--arch/s390/pci/pci.c153
-rw-r--r--arch/s390/pci/pci_clp.c13
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_dma.c9
-rw-r--r--arch/s390/pci/pci_insn.c202
-rw-r--r--arch/s390/pci/pci_msi.c10
-rw-r--r--arch/sparc/mm/tlb.c3
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/pci/common.c11
-rw-r--r--arch/x86/pci/xen.c6
-rw-r--r--arch/x86/xen/enlighten.c57
-rw-r--r--arch/x86/xen/smp.c21
-rw-r--r--arch/x86/xen/spinlock.c25
-rw-r--r--arch/x86/xen/time.c13
-rw-r--r--crypto/algif_hash.c2
-rw-r--r--crypto/algif_skcipher.c1
-rw-r--r--drivers/acpi/pci_root.c48
-rw-r--r--drivers/acpi/pci_slot.c170
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/ata/ahci.c10
-rw-r--r--drivers/eisa/eisa-bus.c82
-rw-r--r--drivers/eisa/pci_eisa.c5
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/gpio/gpio-pxa.c7
-rw-r--r--drivers/iommu/amd_iommu.c14
-rw-r--r--drivers/iommu/amd_iommu_init.c40
-rw-r--r--drivers/iommu/amd_iommu_types.h11
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/pci/bus.c14
-rw-r--r--drivers/pci/hotplug/Kconfig7
-rw-r--r--drivers/pci/hotplug/acpiphp.h35
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c30
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c415
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h44
-rw-r--r--drivers/pci/hotplug/cpqphp.h70
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.h12
-rw-r--r--drivers/pci/hotplug/ibmphp.h66
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c15
-rw-r--r--drivers/pci/hotplug/pciehp.h22
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar.h8
-rw-r--r--drivers/pci/hotplug/rpaphp.h16
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c12
-rw-r--r--drivers/pci/hotplug/shpchp.h26
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c2
-rw-r--r--drivers/pci/msi.c180
-rw-r--r--drivers/pci/msi.h24
-rw-r--r--drivers/pci/pci-acpi.c30
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pci.h89
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c10
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h14
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c4
-rw-r--r--drivers/pci/pcie/pme.c2
-rw-r--r--drivers/pci/pcie/portdrv.h18
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c1
-rw-r--r--drivers/pci/pcie/portdrv_pci.c17
-rw-r--r--drivers/pci/probe.c15
-rw-r--r--drivers/pci/quirks.c148
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/slot.c7
-rw-r--r--drivers/s390/block/dasd.c172
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c344
-rw-r--r--drivers/s390/block/dasd_int.h10
-rw-r--r--drivers/s390/block/dasd_ioctl.c31
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/block/scm_blk_cluster.c6
-rw-r--r--drivers/s390/char/con3215.c4
-rw-r--r--drivers/s390/char/monreader.c3
-rw-r--r--drivers/s390/char/raw3270.c6
-rw-r--r--drivers/s390/char/sclp_cmd.c4
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chp.c22
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c11
-rw-r--r--drivers/s390/cio/cio.c160
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/css.c114
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device.c139
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_ops.c22
-rw-r--r--drivers/s390/cio/idset.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/mvsas/mv_init.c6
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/mvumi.h1
-rw-r--r--drivers/tty/tty_io.c18
-rw-r--r--drivers/vfio/pci/vfio_pci.c10
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/xen/events.c33
-rw-r--r--drivers/xen/xen-acpi-processor.c82
-rw-r--r--fs/aio.c2
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/msi.h23
-rw-r--r--include/linux/mutex.h3
-rw-r--r--include/linux/pci-acpi.h29
-rw-r--r--include/linux/pci-aspm.h20
-rw-r--r--include/linux/pci-ats.h26
-rw-r--r--include/linux/pci.h117
-rw-r--r--include/linux/pci_hotplug.h12
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pcieport_if.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/uapi/linux/pci_regs.h30
-rw-r--r--include/xen/events.h3
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/lockdep.c29
-rw-r--r--kernel/mutex.c151
-rw-r--r--kernel/rtmutex-tester.c5
-rw-r--r--kernel/sched/core.c45
-rw-r--r--kernel/sched/features.h7
-rw-r--r--mm/nommu.c10
191 files changed, 3169 insertions, 2825 deletions
diff --git a/Documentation/ia64/err_inject.txt b/Documentation/ia64/err_inject.txt
index 223e4f0582d0..9f651c181429 100644
--- a/Documentation/ia64/err_inject.txt
+++ b/Documentation/ia64/err_inject.txt
@@ -882,7 +882,7 @@ int err_inj()
cpu=parameters[i].cpu;
k = cpu%64;
j = cpu/64;
- mask[j]=1<<k;
+ mask[j] = 1UL << k;
if (sched_setaffinity(0, MASK_SIZE*8, mask)==-1) {
perror("Error sched_setaffinity:");
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 13f1aa09b938..9c7fd988e299 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -297,6 +297,7 @@ Boot into System Kernel
On ia64, 256M@256M is a generous value that typically works.
The region may be automatically placed on ia64, see the
dump-capture kernel config option notes above.
+ If use sparse memory, the size should be rounded to GRANULE boundaries.
On s390x, typically use "crashkernel=xxM". The value of xx is dependent
on the memory consumption of the kdump system. In general this is not
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index ae66f9b90a25..fcaf0b4efba2 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -143,7 +143,8 @@ Parameter: id: handle for debug log
Return Value: none
-Description: frees memory for a debug log
+Description: frees memory for a debug log and removes all registered debug
+ views.
Must not be called within an interrupt handler
---------------------------------------------------------------------------
diff --git a/Makefile b/Makefile
index 46263d808876..8fe69916e726 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Unicycling Gorilla
# *DOCUMENTATION*
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 9e0576569e07..eaba9dc91a0d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2714,16 +2714,22 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
{ }
};
-static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = {
- { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" },
-};
-
/* ocp2scp_usb_phy */
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
.class = &omap44xx_ocp2scp_hwmod_class,
.clkdm_name = "l3_init_clkdm",
- .main_clk = "func_48m_fclk",
+ /*
+ * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP
+ * block as an "optional clock," and normally should never be
+ * specified as the main_clk for an OMAP IP block. However it
+ * turns out that this clock is actually the main clock for
+ * the ocp2scp_usb_phy IP block:
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html
+ * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems
+ * to be the best workaround.
+ */
+ .main_clk = "ocp2scp_usb_phy_phy_48m",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET,
@@ -2732,8 +2738,6 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
},
},
.dev_attr = ocp2scp_dev_attr,
- .opt_clks = ocp2scp_usb_phy_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),
};
/*
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9a02f71c6b1f..e7e55a00f94f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -187,7 +187,7 @@ config IA64_DIG
config IA64_DIG_VTD
bool "DIG+Intel+IOMMU"
- select DMAR
+ select INTEL_IOMMU
select PCI_MSI
config IA64_HP_ZX1
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index d2bf1fd5e44f..76acbcd5c060 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
{
- register unsigned long r8 __asm ("r8");
+ register unsigned long r8 __asm ("r8") = 0;
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov %0=r0 \n"
" mov ar.ccv=%4;; \n"
"[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (r8), "=r" (prev)
+ : "+r" (r8), "=&r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 43f96ab18fa0..8c7096168716 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
extern int cpe_vector;
extern int ia64_cpe_irq;
extern void ia64_mca_init(void);
+extern void ia64_mca_irq_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 2e27ef175652..2db0a6c6daa5 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr);
extern void map_cpu_to_node(int cpu, int nid);
extern void unmap_cpu_from_node(int cpu, int nid);
-
+extern void numa_clear_node(int cpu);
#else /* !CONFIG_NUMA */
#define map_cpu_to_node(cpu, nid) do{}while(0)
#define unmap_cpu_from_node(cpu, nid) do{}while(0)
-
#define paddr_to_nid(addr) 0
-
+#define numa_clear_node(cpu) do { } while (0)
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c4cd45d97749..abc6dee3799c 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -90,53 +90,6 @@ ENTRY(fsys_getpid)
FSYS_RETURN
END(fsys_getpid)
-ENTRY(fsys_getppid)
- .prologue
- .altrp b6
- .body
- add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
- ;;
- ld8 r17=[r17] // r17 = current->group_leader
- add r9=TI_FLAGS+IA64_TASK_SIZE,r16
- ;;
-
- ld4 r9=[r9]
- add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
- ;;
- and r9=TIF_ALLWORK_MASK,r9
-
-1: ld8 r18=[r17] // r18 = current->group_leader->real_parent
- ;;
- cmp.ne p8,p0=0,r9
- add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = &current->group_leader->real_parent->tgid
- ;;
-
- /*
- * The .acq is needed to ensure that the read of tgid has returned its data before
- * we re-check "real_parent".
- */
- ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
-#ifdef CONFIG_SMP
- /*
- * Re-read current->group_leader->real_parent.
- */
- ld8 r19=[r17] // r19 = current->group_leader->real_parent
-(p8) br.spnt.many fsys_fallback_syscall
- ;;
- cmp.ne p6,p0=r18,r19 // did real_parent change?
- mov r19=0 // i must not leak kernel bits...
-(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
- ;;
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
-#else
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
- mov r19=0 // i must not leak kernel bits...
-#endif
- FSYS_RETURN
-END(fsys_getppid)
-
ENTRY(fsys_set_tid_address)
.prologue
.altrp b6
@@ -614,7 +567,7 @@ paravirt_fsyscall_table:
data8 0 // chown
data8 0 // lseek // 1040
data8 fsys_getpid // getpid
- data8 fsys_getppid // getppid
+ data8 0 // getppid
data8 0 // mount
data8 0 // umount
data8 0 // setuid // 1045
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index ee33c3aaa2fc..19f107be734e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -76,7 +76,7 @@
* PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
*
* Note: The term "IRQ" is loosely used everywhere in Linux kernel to
- * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
+ * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
* (isa_irq) is the only exception in this source code.
*/
@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
return 0;
}
+static int
+iosapic_delete_rte(unsigned int irq, unsigned int gsi)
+{
+ struct iosapic_rte_info *rte, *temp;
+
+ list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
+ rte_list) {
+ if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
+ if (rte->refcnt)
+ return -EBUSY;
+
+ list_del(&rte->rte_list);
+ kfree(rte);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
{
int num_rte, err, index;
@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
int iosapic_remove(unsigned int gsi_base)
{
- int index, err = 0;
+ int i, irq, index, err = 0;
unsigned long flags;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)
goto out;
}
+ for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
+ irq = __gsi_to_irq(i);
+ if (irq < 0)
+ continue;
+
+ err = iosapic_delete_rte(irq, i);
+ if (err)
+ goto out;
+ }
+
iounmap(iosapic_lists[index].addr);
iosapic_free(index);
out:
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ad69606613eb..f2c418281130 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <asm/mca.h>
+
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
#endif /* CONFIG_SMP */
+int __init arch_early_irq_init(void)
+{
+ ia64_mca_irq_init();
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS];
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 65bf9cd39044..d7396dbb07bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
printk(KERN_INFO "MCA related initialization done\n");
}
+
/*
- * ia64_mca_late_init
- *
- * Opportunity to setup things that require initialization later
- * than ia64_mca_init. Setup a timer to poll for CPEs if the
- * platform doesn't support an interrupt driven mechanism.
- *
- * Inputs : None
- * Outputs : Status
+ * These pieces cannot be done in ia64_mca_init() because it is called before
+ * early_irq_init() which would wipe out our percpu irq registrations. But we
+ * cannot leave them until ia64_mca_late_init() because by then all the other
+ * processors have been brought online and have set their own CMC vectors to
+ * point at a non-existant action. Called from arch_early_irq_init().
*/
-static int __init
-ia64_mca_late_init(void)
+void __init ia64_mca_irq_init(void)
{
- if (!mca_init)
- return 0;
-
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
+}
+
+/*
+ * ia64_mca_late_init
+ *
+ * Opportunity to setup things that require initialization later
+ * than ia64_mca_init. Setup a timer to poll for CPEs if the
+ * platform doesn't support an interrupt driven mechanism.
+ *
+ * Inputs : None
+ * Outputs : Status
+ */
+static int __init
+ia64_mca_late_init(void)
+{
+ if (!mca_init)
+ return 0;
register_hotcpu_notifier(&mca_cpu_notifier);
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 9392e021c93b..94f8bf777afa 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -349,7 +349,7 @@ init_record_index_pools(void)
/* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
- slidx_pool.buffer = (slidx_list_t *)
+ slidx_pool.buffer =
kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM;
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4332f7ee5203..a7869f8f49a6 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"srlz.d;;"
"ssm psr.i;;"
"srlz.d;;"
- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
return ret;
}
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 3dccdd8eb275..43964cde6214 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -16,7 +16,7 @@
#include <asm/meminit.h>
static inline void __iomem *
-__ioremap (unsigned long phys_addr)
+__ioremap_uc(unsigned long phys_addr)
{
return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr)
void __iomem *
early_ioremap (unsigned long phys_addr, unsigned long size)
{
- return __ioremap(phys_addr);
+ u64 attr;
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return (void __iomem *) phys_to_virt(phys_addr);
+ return __ioremap_uc(phys_addr);
}
void __iomem *
@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
if (attr & EFI_MEMORY_WB)
return (void __iomem *) phys_to_virt(phys_addr);
else if (attr & EFI_MEMORY_UC)
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
/*
* Some chipsets don't support UC access to memory. If
@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
return (void __iomem *) (offset + (char __iomem *)addr);
}
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap);
@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 3efea7d0a351..def782e31aac 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -73,6 +73,11 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
return -1;
}
+void __cpuinit numa_clear_node(int cpu)
+{
+ unmap_cpu_from_node(cpu, NUMA_NO_NODE);
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* SRAT information is stored in node_memblk[], then we can use SRAT
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 60532ab27346..de1474ff0bc5 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/slab.h>
@@ -458,6 +459,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
platform_pci_fixup_bus(b);
}
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
void pcibios_set_master (struct pci_dev *dev)
{
/* No special bus mastering setup handling */
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 14c1711238c0..e35f6485c1fd 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -490,11 +490,14 @@ static int __init tiocx_init(void)
{
cnodeid_t cnodeid;
int found_tiocx_device = 0;
+ int err;
if (!ia64_platform_is("sn2"))
return 0;
- bus_register(&tiocx_bus_type);
+ err = bus_register(&tiocx_bus_type);
+ if (err)
+ return err;
for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
nasid_t nasid;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 99fc547af9d3..eab99e536b5c 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -31,7 +31,7 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0872f12f268d..594e60d6a43b 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -115,7 +115,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
- bus->dev.of_node = hose->of_node;
}
}
@@ -169,6 +168,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
}
}
}
+
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->of_node);
+}
#endif
static DEFINE_MUTEX(pci_scan_mutex);
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 01d95e2f0581..113e28206503 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER
endif
# Use long jumps instead of long branches (needed if your linker fails to
-# link a too big vmlinux executable)
-cflags-$(CONFIG_MLONGCALLS) += -mlong-calls
+# link a too big vmlinux executable). Not enabled for building modules.
+ifdef CONFIG_MLONGCALLS
+KBUILD_CFLAGS_KERNEL += -mlong-calls
+endif
# select which processor to optimise for
cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 79f694f3ad9b..f0e2784e7cca 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-#define kunmap(page) kunmap_parisc(page_address(page))
+static inline void kunmap(struct page *page)
+{
+ kunmap_parisc(page_address(page));
+}
static inline void *kmap_atomic(struct page *page)
{
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 7df49fad29f9..1e40d7f86be3 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cache.h>
+extern spinlock_t pa_dbit_lock;
+
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&pa_dbit_lock, flags); \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
+ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
+ pte_t pte;
+ unsigned long flags;
+
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
-#else
- pte_t pte = *ptep;
- if (!pte_young(pte))
+
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ pte = *ptep;
+ if (!pte_young(pte)) {
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ }
+ set_pte(ptep, pte_mkold(pte));
+ purge_tlb_entries(vma->vm_mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 1;
-#endif
}
-extern spinlock_t pa_dbit_lock;
-
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
+ unsigned long flags;
- spin_lock(&pa_dbit_lock);
+ spin_lock_irqsave(&pa_dbit_lock, flags);
old_pte = *ptep;
pte_clear(mm,addr,ptep);
- spin_unlock(&pa_dbit_lock);
+ purge_tlb_entries(mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
- unsigned long new, old;
-
- do {
- old = pte_val(*ptep);
- new = pte_val(pte_wrprotect(__pte (old)));
- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+ unsigned long flags;
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
-#else
- pte_t old_pte = *ptep;
- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-#endif
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4ba2c93770f1..e0a82358517e 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -181,30 +181,24 @@ struct exception_data {
#if !defined(CONFIG_64BIT)
#define __put_kernel_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%1)" \
- "\n2:\tstw %3,4(%1)\n\t" \
+ "\n2:\tstw %R2,4(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
#define __put_user_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr3,%1)" \
- "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
+ "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 4b12890642eb..83ded26cad06 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
/* Note: purge_tlb_entries can be called at startup with
no context. */
- /* Disable preemption while we play with %sr1. */
- preempt_disable();
- mtsp(mm->context, 1);
purge_tlb_start(flags);
+ mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
- preempt_enable();
}
EXPORT_SYMBOL(purge_tlb_entries);
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6795dc6c995f..568b2c61ea02 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -120,11 +120,13 @@ extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
+extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 5f2e6904d14a..5651536ac733 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,6 +2,7 @@
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+ ucmpdi2.o
obj-y := iomap.o
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 000000000000..149c016f32c5
--- /dev/null
+++ b/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,25 @@
+#include <linux/module.h>
+
+union ull_union {
+ unsigned long long ull;
+ struct {
+ unsigned int high;
+ unsigned int low;
+ } ui;
+};
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ union ull_union au = {.ull = a};
+ union ull_union bu = {.ull = b};
+
+ if (au.ui.high < bu.ui.high)
+ return 0;
+ else if (au.ui.high > bu.ui.high)
+ return 2;
+ if (au.ui.low < bu.ui.low)
+ return 0;
+ else if (au.ui.low > bu.ui.low)
+ return 2;
+ return 1;
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index eb8fb629f00b..bda6ba6f3cf5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -375,19 +375,6 @@ config PACK_STACK
Say Y if you are unsure.
-config SMALL_STACK
- def_bool n
- prompt "Use 8kb for kernel stack instead of 16kb"
- depends on PACK_STACK && 64BIT && !LOCKDEP
- help
- If you say Y here and the compiler supports the -mkernel-backchain
- option the kernel will use a smaller kernel stack size. The reduced
- size is 8kb instead of 16kb. This allows to run more threads on a
- system and reduces the pressure on the memory management for higher
- order page allocations.
-
- Say N if you are unsure.
-
config CHECK_STACK
def_bool y
prompt "Detect kernel stack overflow"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7e3ce78d4290..a7d68a467ce8 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
ifeq ($(call cc-option-yn,-mkernel-backchain),y)
cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 9fd4a40c6752..bb5dd496614f 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
- if (IS_ERR(dbfs_dir))
- return PTR_ERR(dbfs_dir);
- return 0;
+ return PTR_RET(dbfs_dir);
}
void hypfs_dbfs_exit(void)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 15422933c60b..4d8604e311f3 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
#ifndef CONFIG_64BIT
-#define __BITOPS_ALIGN 3
-#define __BITOPS_WORDSIZE 32
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
#else /* CONFIG_64BIT */
-#define __BITOPS_ALIGN 7
-#define __BITOPS_WORDSIZE 64
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
#endif /* CONFIG_64BIT */
-#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
-#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
+#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#ifdef CONFIG_SMP
/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND/test mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
- __BITOPS_BARRIER();
+ barrier();
return (old ^ new) != 0;
}
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr |= 1 << (nr & 7);
}
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr &= ~(1 << (nr & 7));
}
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr ^= 1 << (nr & 7);
}
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(volatile unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
static inline int
__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
return (((volatile char *) addr)
- [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
+ [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
}
#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffz_word returns __BITOPS_WORDSIZE
+ * __ffz_word returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, *p >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffs_word returns __BITOPS_WORDSIZE
+ * __ffs_word returns BITS_PER_LONG
* if no one bit is present in the word.
*/
set = __ffs_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_le(p, size);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e6061617a50b..f201af8be580 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
-extern int ccw_device_force_console(void);
+extern void ccw_device_wait_idle(struct ccw_device *);
+extern int ccw_device_force_console(struct ccw_device *);
int ccw_device_siosl(struct ccw_device *);
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ad2b924167d7..ffb898961c8d 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
-extern void wait_cons_dev(void);
-
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index f8c6df6cd1f0..c1e7c646727c 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
+typedef struct {
+ u32 mask;
+ u32 addr;
+} __aligned(8) psw_compat_t;
+
+typedef struct {
+ psw_compat_t psw;
+ u32 gprs[NUM_GPRS];
+ u32 acrs[NUM_ACRS];
+ u32 orig_gpr2;
+} s390_compat_regs;
+
+typedef struct {
+ u32 gprs_high[NUM_GPRS];
+} s390_compat_regs_high;
+
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
};
struct compat_statfs {
- s32 f_type;
- s32 f_bsize;
- s32 f_blocks;
- s32 f_bfree;
- s32 f_bavail;
- s32 f_files;
- s32 f_ffree;
+ u32 f_type;
+ u32 f_bsize;
+ u32 f_blocks;
+ u32 f_bfree;
+ u32 f_bavail;
+ u32 f_files;
+ u32 f_ffree;
+ compat_fsid_t f_fsid;
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
+};
+
+struct compat_statfs64 {
+ u32 f_type;
+ u32 f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
compat_fsid_t f_fsid;
- s32 f_namelen;
- s32 f_frsize;
- s32 f_flags;
- s32 f_spare[5];
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
return is_32bit_task();
}
-#endif
-
static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
return (void __user *) (stack - len);
}
+#endif
+
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1bfdf24b85a2..78f4f8711d58 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
*/
#include <asm/ptrace.h>
+#include <asm/compat.h>
+#include <asm/syscall.h>
#include <asm/user.h>
typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
-#ifdef CONFIG_64BIT
+#ifndef CONFIG_COMPAT
+#define SET_PERSONALITY(ex) \
+do { \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+} while (0)
+#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- else \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table_emu; \
+ } else { \
clear_thread_flag(TIF_31BIT); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+ } \
} while (0)
-#endif /* CONFIG_64BIT */
+#endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 05333b7f0469..6c1801235db9 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
+int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
void zpci_free_device(struct zpci_dev *);
int zpci_scan_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6bbec4265b6e..1ca5d1047c71 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
#ifdef CONFIG_PCI_DEBUG
-#define zpci_dbg(fmt, args...) \
- do { \
- if (pci_debug_msg_id->level >= 2) \
- debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
- } while (0)
+#define zpci_dbg(imp, fmt, args...) \
+ debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(fmt, args...) do { } while (0)
+#define zpci_dbg(imp, fmt, args...) do { } while (0)
#endif
#define zpci_err(text...) \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 1486a98d5dad..e6a2bdd4d705 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
-#include <linux/delay.h>
-
-#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
-
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
u64 reserved7;
} __packed;
-/* Modify PCI Function Controls */
-static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
- : : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
-{
- u8 cc, status;
-
- do {
- cc = __mpcifc(req, fib, &status);
- if (cc == 2)
- msleep(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
- __func__, cc, status);
- return (cc) ? -EIO : 0;
-}
-
-/* Refresh PCI Translations */
-static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
-{
- register u64 __addr asm("2") = addr;
- register u64 __range asm("3") = range;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d30000,%[fn],%[addr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn)
- : [addr] "d" (__addr), "d" (__range)
- : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
-{
- u8 cc, status;
-
- do {
- cc = __rpcit(fn, addr, range, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
- __func__, cc, status, addr, range);
- return (cc) ? -EIO : 0;
-}
-
-/* Store PCI function controls */
-static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
-{
- u64 fn = (u64) handle << 32 | space << 16;
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
- : : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-/* Set Interruption Controls */
-static inline void sic_instr(u16 ctl, char *unused, u8 isc)
-{
- asm volatile (
- " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
- : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
-}
-
-/* PCI Load */
-static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u64 __data;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d20000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
- : "d" (__offset)
- : "cc");
- *status = __req >> 24 & 0xff;
- *data = __data;
- return cc;
-}
-
-static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcilg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc) {
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- /* TODO: on IO errors set data to 0xff...
- * here or in users of pcilg (le conversion)?
- */
- }
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store */
-static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d00000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (__req)
- : "d" (__offset), [data] "d" (data)
- : "cc");
- *status = __req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistg_instr(u64 data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store Block */
-static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req)
- : [offset] "d" (offset), [data] "Q" (*data)
- : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistb(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
+int s390pci_load(u64 *data, u64 req, u64 offset);
+int s390pci_store(u64 data, u64 req, u64 offset);
+int s390pci_store_block(const u64 *data, u64 req, u64 offset);
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 5fd81f31d6c7..83a9caa6ae53 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \
int rc; \
\
- rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
+ rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
+ s390pci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */
break;
}
- return pcistg_instr(val, req, offset);
+ return s390pci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
- u8 cc;
+ int cc;
+
+ cc = s390pci_load(&data, req, offset);
+ if (cc)
+ goto out;
- cc = pcilg_instr(&data, req, offset);
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
*((u64 *) dst) = (u64) data;
break;
}
+out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
- return pcistb_instr(data, req, offset);
+ return s390pci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3cb47cf02530..4a64c0e5428f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -764,6 +764,8 @@ void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long length);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long __gmap_translate(unsigned long address, struct gmap *);
+unsigned long gmap_translate(unsigned long address, struct gmap *);
unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 94e749c90230..6b499870662f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]);
+extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
+ unsigned int len);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 3ee5da3bc10c..559512a455da 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-#ifndef __s390x__
-#else /* __s390x__ */
-#endif /* __s390x__ */
+
extern long psw_kernel_bits;
extern long psw_user_bits;
@@ -77,8 +75,6 @@ struct per_struct_kernel {
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
-#ifdef __s390x__
-#endif /* __s390x__ */
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fe7b99759e12..cd29d2f4e4f3 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
+extern const unsigned int sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 9e2cfe0349c3..eb5f64d26d06 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else /* CONFIG_64BIT */
-#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#else
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#endif
#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
+ unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a5ca214b34fd..3aa9f1ec5b29 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
-typedef struct
-{
- __u32 mask;
- __u32 addr;
-} __attribute__ ((aligned(8))) psw_compat_t;
-
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
unsigned long orig_gpr2;
} s390_regs;
-typedef struct
-{
- psw_compat_t psw;
- __u32 gprs[NUM_GPRS];
- __u32 acrs[NUM_ACRS];
- __u32 orig_gpr2;
-} s390_compat_regs;
-
-typedef struct
-{
- __u32 gprs_high[NUM_GPRS];
-} s390_compat_regs_high;
-
-
/*
* Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 5acca0a34c20..a61d538756f2 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
#ifndef _S390_STATFS_H
#define _S390_STATFS_H
-#ifndef __s390x__
-#include <asm-generic/statfs.h>
-#else
/*
* We can't use <asm-generic/statfs.h> because in 64-bit mode
* we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
#endif
struct statfs {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
struct statfs64 {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
-struct compat_statfs64 {
- __u32 f_type;
- __u32 f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __u32 f_namelen;
- __u32 f_frsize;
- __u32 f_flags;
- __u32 f_spare[4];
-};
-
-#endif /* __s390x__ */
#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2ac311ef5c9b..1386fcaf4ef6 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -14,16 +14,25 @@ endif
CFLAGS_smp.o := -Wno-nonnull
#
+# Disable tailcall optimizations for stack / callchain walking functions
+# since this might generate broken code when accessing register 15 and
+# passing its content to other functions.
+#
+CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
+CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
+
+#
# Pass UTS_MACHINE for user_regset definition
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
- processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
- debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
- sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
+obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y += dumpstack.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fface87056eb..7a82f9f70100 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
+ DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6de049fbe62d..c439ac9ced09 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
+ regs->gprs[6] = task_thread_info(current)->last_break;
}
/* Place signal number on stack to allow backtrace from handler. */
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
+ regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3ad5e9540160..7f4a4a8c847c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
* insn_to_mnemonic - decode an s390 instruction
* @instruction: instruction to decode
* @buf: buffer to fill with mnemonic
+ * @len: length of buffer
*
* Decode the instruction at @instruction and store the corresponding
- * mnemonic into @buf.
+ * mnemonic into @buf of length @len.
* @buf is left unchanged if the instruction could not be decoded.
* Returns:
* %0 on success, %-ENOENT if the instruction was not found.
*/
-int insn_to_mnemonic(unsigned char *instruction, char buf[8])
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
struct insn *insn;
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
if (!insn)
return -ENOENT;
if (insn->name[0] == '\0')
- snprintf(buf, 8, "%s",
+ snprintf(buf, len, "%s",
long_insn_name[(int) insn->name[1]]);
else
- snprintf(buf, 8, "%.5s", insn->name);
+ snprintf(buf, len, "%.5s", insn->name);
return 0;
}
EXPORT_SYMBOL_GPL(insn_to_mnemonic);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
new file mode 100644
index 000000000000..03dce39d01ee
--- /dev/null
+++ b/arch/s390/kernel/dumpstack.c
@@ -0,0 +1,236 @@
+/*
+ * Stack dumping functions
+ *
+ * Copyright IBM Corp. 1999, 2013
+ */
+
+#include <linux/kallsyms.h>
+#include <linux/hardirq.h>
+#include <linux/kprobes.h>
+#include <linux/utsname.h>
+#include <linux/export.h>
+#include <linux/kdebug.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#ifndef CONFIG_64BIT
+#define LONG "%08lx "
+#define FOURLONG "%08lx %08lx %08lx %08lx\n"
+static int kstack_depth_to_print = 12;
+#else /* CONFIG_64BIT */
+#define LONG "%016lx "
+#define FOURLONG "%016lx %016lx %016lx %016lx\n"
+static int kstack_depth_to_print = 20;
+#endif /* CONFIG_64BIT */
+
+/*
+ * For show_trace we have tree different stack to consider:
+ * - the panic stack which is used if the kernel stack has overflown
+ * - the asynchronous interrupt stack (cpu related)
+ * - the synchronous kernel stack (process related)
+ * The stack trace can start at any of the three stack and can potentially
+ * touch all of them. The order is: panic stack, async stack, sync stack.
+ */
+static unsigned long
+__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+{
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+
+ while (1) {
+ sp = sp & PSW_ADDR_INSN;
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+ /* Follow the backchain. */
+ while (1) {
+ low = sp;
+ sp = sf->back_chain & PSW_ADDR_INSN;
+ if (!sp)
+ break;
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+ }
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+ if (sp <= low || sp > high - sizeof(*regs))
+ return sp;
+ regs = (struct pt_regs *) sp;
+ printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+ low = sp;
+ sp = regs->gprs[15];
+ }
+}
+
+static void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ register unsigned long __r15 asm ("15");
+ unsigned long sp;
+
+ sp = (unsigned long) stack;
+ if (!sp)
+ sp = task ? task->thread.ksp : __r15;
+ printk("Call Trace:\n");
+#ifdef CONFIG_CHECK_STACK
+ sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
+ S390_lowcore.panic_stack);
+#endif
+ sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
+ S390_lowcore.async_stack);
+ if (task)
+ __show_trace(sp, (unsigned long) task_stack_page(task),
+ (unsigned long) task_stack_page(task) + THREAD_SIZE);
+ else
+ __show_trace(sp, S390_lowcore.thread_info,
+ S390_lowcore.thread_info + THREAD_SIZE);
+ if (!task)
+ task = current;
+ debug_show_held_locks(task);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ register unsigned long *__r15 asm ("15");
+ unsigned long *stack;
+ int i;
+
+ if (!sp)
+ stack = task ? (unsigned long *) task->thread.ksp : __r15;
+ else
+ stack = sp;
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if ((i * sizeof(long) % 32) == 0)
+ printk("%s ", i == 0 ? "" : "\n");
+ printk(LONG, *stack++);
+ }
+ printk("\n");
+ show_trace(task, sp);
+}
+
+static void show_last_breaking_event(struct pt_regs *regs)
+{
+#ifdef CONFIG_64BIT
+ printk("Last Breaking-Event-Address:\n");
+ printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+#endif
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+{
+ return (regs->psw.mask & bits) / ((~bits + 1) & bits);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ char *mode;
+
+ mode = user_mode(regs) ? "User" : "Krnl";
+ printk("%s PSW : %p %p",
+ mode, (void *) regs->psw.mask,
+ (void *) regs->psw.addr);
+ print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
+ printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
+ "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
+ mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
+ mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
+ mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
+ mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
+ mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
+#ifdef CONFIG_64BIT
+ printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
+#endif
+ printk("\n%s GPRS: " FOURLONG, mode,
+ regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+ printk(" " FOURLONG,
+ regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+ printk(" " FOURLONG,
+ regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+ printk(" " FOURLONG,
+ regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+ show_code(regs);
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_registers(regs);
+ /* Show stack backtrace if pt_regs is from kernel mode */
+ if (!user_mode(regs))
+ show_trace(NULL, (unsigned long *) regs->gprs[15]);
+ show_last_breaking_event(regs);
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+
+ oops_enter();
+ lgr_info_log();
+ debug_stop_all();
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+#endif
+ printk("\n");
+ notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
+ print_modules();
+ show_regs(regs);
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception: panic_on_oops");
+ oops_exit();
+ do_exit(SIGSEGV);
+}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 94feff7d6132..4d5e6f8a7978 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
#define BASED(name) name-system_call(%r13)
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: l %r15,\stack # load target stack
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro ADD64 high,low,timer
@@ -150,7 +151,7 @@ ENTRY(__switch_to)
l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
lr %r15,%r5
- ahi %r15,STACK_SIZE # end of kernel stack of next
+ ahi %r15,STACK_INIT # end of kernel stack of next
st %r3,__LC_CURRENT # store task struct of next
st %r5,__LC_THREAD_INFO # store thread info of next
st %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -178,7 +179,6 @@ sysc_stm:
l %r13,__LC_SVC_NEW_PSW+4
sysc_per:
l %r15,__LC_KERNEL_STACK
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -188,6 +188,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+3(%r12),_TIF_SYSCALL
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0
jnz sysc_nr_ok
@@ -198,7 +199,6 @@ sysc_do_svc:
lr %r8,%r1
sla %r8,2
sysc_nr_ok:
- l %r10,BASED(.Lsys_call_table) # 31 bit system call table
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
l %r15,__LC_KERNEL_STACK
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
stm %r8,%r9,__PT_PSW(%r11)
@@ -485,7 +485,6 @@ io_work:
#
io_work_user:
l %r1,__LC_KERNEL_STACK
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -646,7 +645,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -674,6 +672,7 @@ mcck_panic:
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
+ j mcck_skip
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
@@ -714,12 +713,10 @@ ENTRY(restart_int_handler)
*/
stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack
- ahi %r15,-__PT_SIZE # create pt_regs
- stm %r0,%r7,__PT_R0(%r15)
- stm %r8,%r9,__PT_PSW(%r15)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stm %r0,%r7,__PT_R0(%r11)
+ stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(32,%r11),0(%r14)
- lr %r15,%r11
- ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,BASED(1f)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
lr %r2,%r11 # pass pointer to pt_regs
@@ -799,15 +796,14 @@ cleanup_system_call:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
# set up saved register 11
l %r15,__LC_KERNEL_STACK
- ahi %r15,-__PT_SIZE
- st %r15,12(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ st %r9,12(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
+ stm %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register 15
- ahi %r15,-STACK_FRAME_OVERHEAD
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@@ -910,7 +906,6 @@ cleanup_idle_wait:
.Ltrace_enter: .long do_syscall_trace_enter
.Ltrace_exit: .long do_syscall_trace_exit
.Lschedule_tail: .long schedule_tail
-.Lsys_call_table: .long sys_call_table
.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
.Lhardirqs_on: .long trace_hardirqs_on_caller
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3a736a3ed44..aa0ab02e9595 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -7,6 +7,7 @@
#include <asm/cputime.h>
extern void *restart_stack;
+extern unsigned long suspend_zero_pages;
void system_call(void);
void pgm_check_handler(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2e6d60c55f90..4c17eece707e 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lg %r15,\stack # load target stack
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME scratch,enter_timer
@@ -177,7 +178,7 @@ ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
lgr %r15,%r5
- aghi %r15,STACK_SIZE # end of kernel stack of next
+ aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -203,10 +204,8 @@ sysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
sysc_per:
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@@ -217,6 +216,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+7(%r12),_TIF_SYSCALL
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok
@@ -227,13 +227,6 @@ sysc_do_svc:
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
sysc_nr_ok:
- larl %r10,sys_call_table # 64 bit system call table
-#ifdef CONFIG_COMPAT
- tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
- jno sysc_noemu
- larl %r10,sys_call_table_emu # 31 bit system call table
-sysc_noemu:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r14
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -526,7 +519,6 @@ io_work:
#
io_work_user:
lg %r1,__LC_KERNEL_STACK
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -688,7 +680,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -755,14 +746,12 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- lg %r11,__LC_PANIC_STACK # change to panic stack
- aghi %r11,-__PT_SIZE # create pt_regs
+ lg %r15,__LC_PANIC_STACK # change to panic stack
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- lgr %r15,%r11
- aghi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
@@ -846,15 +835,14 @@ cleanup_system_call:
mvc __TI_last_break(8,%r12),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-__PT_SIZE
- stg %r15,24(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register r15
- aghi %r15,-STACK_FRAME_OVERHEAD
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,sysc_do_svc
@@ -1011,6 +999,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
+ .globl sys_call_table_emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b3de27700016..ac2178161ec3 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
+#include <linux/suspend.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
@@ -67,6 +68,35 @@ void setup_regs(void)
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
+/*
+ * PM notifier callback for kdump
+ */
+static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ if (crashk_res.start)
+ crash_map_reserved_pages();
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ if (crashk_res.start)
+ crash_unmap_reserved_pages();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init machine_kdump_pm_init(void)
+{
+ pm_notifier(machine_kdump_pm_cb, 0);
+ return 0;
+}
+arch_initcall(machine_kdump_pm_init);
#endif
/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 29268859d8ee..0f419c5765c8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
- lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
+ lc->kernel_stack = ((unsigned long) &init_thread_union)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
+ __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
+ __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 549c9d173c0f..8bde89eafd88 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
- lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
+ lc->async_stack = pcpu->async_stack + ASYNC_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->cpu_nr = cpu;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
struct _lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
- lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
+ lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->user_timer = ti->user_timer;
@@ -810,8 +813,10 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
- pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
- pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
+ pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index aa1494d0e380..c479d2f9605b 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -41,6 +41,7 @@ struct page_key_data {
static struct page_key_data *page_key_data;
static struct page_key_data *page_key_rp, *page_key_wp;
static unsigned long page_key_rx, page_key_wx;
+unsigned long suspend_zero_pages;
/*
* For each page in the hibernation image one additional byte is
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
}
+/*
+ * PM notifier callback for suspend
+ */
+static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
+ if (!suspend_zero_pages)
+ return NOTIFY_BAD;
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ free_pages(suspend_zero_pages, LC_ORDER);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init suspend_pm_init(void)
+{
+ pm_notifier(suspend_pm_cb, 0);
+ return 0;
+}
+arch_initcall(suspend_pm_init);
+
void save_processor_state(void)
{
/* swsusp_arch_suspend() actually saves all cpu register contents.
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0617b5..c487be4cfc81 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Save prefix register contents for lowcore */
- llgf %r4,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore copy */
+ llgf %r10,__SF_EMPTY(%r15)
/* Get pointer to save area */
lghi %r1,0x1000
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
+ /* Save absolute zero pages */
+ larl %r2,suspend_zero_pages
+ lg %r2,0(%r2)
+ lghi %r4,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
+ /* Copy lowcore to absolute zero lowcore */
lghi %r2,0
+ lgr %r4,%r10
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
@@ -248,8 +259,20 @@ restore_registers:
/* Load old stack */
lg %r15,0x2f8(%r13)
+ /* Save prefix register */
+ mvc __SF_EMPTY(4,%r15),0x318(%r13)
+
+ /* Restore absolute zero pages */
+ lghi %r2,0
+ larl %r4,suspend_zero_pages
+ lg %r4,0(%r4)
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
/* Restore prefix register */
- spx 0x318(%r13)
+ spx __SF_EMPTY(%r15)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 13dd63fba367..c5762324d9ee 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -12,49 +12,16 @@
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
#include <linux/ptrace.h>
-#include <linux/timer.h>
+#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/seq_file.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-#include <linux/kallsyms.h>
-#include <linux/reboot.h>
-#include <linux/kprobes.h>
-#include <linux/bug.h>
-#include <linux/utsname.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <asm/mathemu.h>
-#include <asm/cpcmd.h>
-#include <asm/lowcore.h>
-#include <asm/debug.h>
-#include <asm/ipl.h>
#include "entry.h"
int show_unhandled_signals = 1;
-#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
-
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
#endif
}
-/*
- * For show_trace we have tree different stack to consider:
- * - the panic stack which is used if the kernel stack has overflown
- * - the asynchronous interrupt stack (cpu related)
- * - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stack and can potentially
- * touch all of them. The order is: panic stack, async stack, sync stack.
- */
-static unsigned long
-__show_trace(unsigned long sp, unsigned long low, unsigned long high)
-{
- struct stack_frame *sf;
- struct pt_regs *regs;
-
- while (1) {
- sp = sp & PSW_ADDR_INSN;
- if (sp < low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
- /* Follow the backchain. */
- while (1) {
- low = sp;
- sp = sf->back_chain & PSW_ADDR_INSN;
- if (!sp)
- break;
- if (sp <= low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
- }
- /* Zero backchain detected, check for interrupt frame. */
- sp = (unsigned long) (sf + 1);
- if (sp <= low || sp > high - sizeof(*regs))
- return sp;
- regs = (struct pt_regs *) sp;
- printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
- low = sp;
- sp = regs->gprs[15];
- }
-}
-
-static void show_trace(struct task_struct *task, unsigned long *stack)
-{
- register unsigned long __r15 asm ("15");
- unsigned long sp;
-
- sp = (unsigned long) stack;
- if (!sp)
- sp = task ? task->thread.ksp : __r15;
- printk("Call Trace:\n");
-#ifdef CONFIG_CHECK_STACK
- sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
- S390_lowcore.panic_stack);
-#endif
- sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
- if (task)
- __show_trace(sp, (unsigned long) task_stack_page(task),
- (unsigned long) task_stack_page(task) + THREAD_SIZE);
- else
- __show_trace(sp, S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
- if (!task)
- task = current;
- debug_show_held_locks(task);
-}
-
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
- register unsigned long * __r15 asm ("15");
- unsigned long *stack;
- int i;
-
- if (!sp)
- stack = task ? (unsigned long *) task->thread.ksp : __r15;
- else
- stack = sp;
-
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
- break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
- }
- printk("\n");
- show_trace(task, sp);
-}
-
-static void show_last_breaking_event(struct pt_regs *regs)
-{
-#ifdef CONFIG_64BIT
- printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
-#endif
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
-
-static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
-{
- return (regs->psw.mask & bits) / ((~bits + 1) & bits);
-}
-
-void show_registers(struct pt_regs *regs)
-{
- char *mode;
-
- mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %p %p",
- mode, (void *) regs->psw.mask,
- (void *) regs->psw.addr);
- print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
- printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
- "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
- mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
- mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
- mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
- mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
- mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
- printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
- regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
- regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
- regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
- regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
-
- show_code(regs);
-}
-
-void show_regs(struct pt_regs *regs)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_registers(regs);
- /* Show stack backtrace if pt_regs is from kernel mode */
- if (!user_mode(regs))
- show_trace(NULL, (unsigned long *) regs->gprs[15]);
- show_last_breaking_event(regs);
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(struct pt_regs *regs, const char *str)
-{
- static int die_counter;
-
- oops_enter();
- lgr_info_log();
- debug_stop_all();
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
- printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
- printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC");
-#endif
- printk("\n");
- notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
- print_modules();
- show_regs(regs);
- bust_spinlocks(0);
- add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irq(&die_lock);
- if (in_interrupt())
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception: panic_on_oops");
- oops_exit();
- do_exit(SIGSEGV);
-}
-
static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 2b29e62351d3..53252d2d4720 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
__entry->instruction,
insn_to_mnemonic((unsigned char *)
&__entry->instruction,
- __entry->insn) ?
+ __entry->insn, sizeof(__entry->insn)) ?
"unknown" : __entry->insn)
);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 479e94282910..9d84a1feefef 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (rc)
- goto out_kthread;
- return 0;
+ if (!IS_ERR(cmm_thread_ptr))
+ return 0;
-out_kthread:
+ rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2fb9e63b8fc4..047c3e4c59a2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
int fault;
trans_exc_code = regs->int_parm_long;
- /* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
+ /*
+ * Protection exceptions are suppressing, decrement psw address.
+ * The exception to this rule are aborted transactions, for these
+ * the PSW already points to the correct location.
+ */
+ if (!(regs->int_code & 0x200))
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 49ce6bb2c641..9f9c315b4c07 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -63,10 +63,18 @@ static unsigned long __init setup_zero_pages(void)
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
- default:
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
order = 2;
break;
+ case 0x2827: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d21040ed5e59..80adfbf75065 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
#include <asm/pgtable.h>
#include <asm/page.h>
+static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
+{
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ : [addr] "+a" (addr) : [skey] "d" (skey));
+ return addr;
+}
+
void storage_key_init_range(unsigned long start, unsigned long end)
{
- unsigned long boundary, function, size;
+ unsigned long boundary, size;
while (start < end) {
- if (MACHINE_HAS_EDAT2) {
- /* set storage keys for a 2GB frame */
- function = 0x22000 | PAGE_DEFAULT_KEY;
- size = 1UL << 31;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
- function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
- start = pfmf(function, start);
+ start = sske_frame(start, PAGE_DEFAULT_KEY);
} while (start < boundary);
continue;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae44d2a34313..bd954e96f51c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ out_unmap:
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
-/*
- * this function is assumed to be called with mmap_sem held
- */
-unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
{
- unsigned long *table, vmaddr, segment;
- struct mm_struct *mm;
- struct gmap_pgtable *mp;
- struct gmap_rmap *rmap;
- struct vm_area_struct *vma;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ unsigned long *table;
- current->thread.gmap_addr = address;
- mm = gmap->mm;
- /* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
+ return table;
+}
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, vmaddr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
- segment = *table;
- if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
- vma = find_vma(mm, vmaddr);
- if (!vma || vma->vm_start > vmaddr)
- return -EFAULT;
-
- /* Walk the parent mm page table */
- pgd = pgd_offset(mm, vmaddr);
- pud = pud_alloc(mm, pgd, vmaddr);
- if (!pud)
- return -ENOMEM;
- pmd = pmd_alloc(mm, pud, vmaddr);
- if (!pmd)
- return -ENOMEM;
- if (!pmd_present(*pmd) &&
- __pte_alloc(mm, vma, pmd, vmaddr))
- return -ENOMEM;
- /* pmd now points to a valid segment table entry. */
- rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
- if (!rmap)
- return -ENOMEM;
- /* Link gmap segment table entry location to page table. */
- page = pmd_page(*pmd);
- mp = (struct gmap_pgtable *) page->index;
- rmap->entry = table;
- spin_lock(&mm->page_table_lock);
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_translate(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+static int gmap_connect_pgtable(unsigned long segment,
+ unsigned long *segment_ptr,
+ struct gmap *gmap)
+{
+ unsigned long vmaddr;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct mm_struct *mm;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mm = gmap->mm;
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->entry = segment_ptr;
+ spin_lock(&mm->page_table_lock);
+ if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper);
- spin_unlock(&mm->page_table_lock);
/* Set gmap segment table entry to page table. */
- *table = pmd_val(*pmd) & PAGE_MASK;
- return vmaddr | (address & ~PMD_MASK);
+ *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
+ rmap = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ kfree(rmap);
+ return 0;
+}
+
+static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry =
+ _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int rc;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return -EFAULT;
+ /* Convert the gmap address to an mm address. */
+ while (1) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
+ /* Page table is present */
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ }
+ if (!(segment & _SEGMENT_ENTRY_RO))
+ /* Nothing mapped in the gmap address space. */
+ break;
+ rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
+ if (rc)
+ return rc;
}
return -EFAULT;
}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_discard);
-void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
-{
- struct gmap_rmap *rmap, *next;
- struct gmap_pgtable *mp;
- struct page *page;
- int flush;
-
- flush = 0;
- spin_lock(&mm->page_table_lock);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
- *rmap->entry =
- _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
- list_del(&rmap->list);
- kfree(rmap);
- flush = 1;
- }
- spin_unlock(&mm->page_table_lock);
- if (flush)
- __tlb_flush_global();
-}
-
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
{
}
-static inline void gmap_unmap_notifier(struct mm_struct *mm,
- unsigned long *table)
+static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
+ unsigned long *table)
{
}
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
unsigned int bit, mask;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
return page_table_free_pgste(table);
}
/* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
mm = tlb->mm;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0972e91cced2..82f165f8078c 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
+ addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
- memset(addrs, 0, fp->len * sizeof(*addrs));
memset(&jit, 0, sizeof(cjit));
memset(&cjit, 0, sizeof(cjit));
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index f0f426a113ce..086a2e37935d 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
- pci_sysfs.o pci_event.o pci_debug.o
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
+ pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 27b4c17855b9..e6f15b5d8b7d 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
static struct kmem_cache *zdev_irq_cache;
static struct kmem_cache *zdev_fmb_cache;
-debug_info_t *pci_debug_msg_id;
-debug_info_t *pci_debug_err_id;
-
static inline int irq_to_msi_nr(unsigned int irq)
{
return irq & ZPCI_MSI_MASK;
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
fib->aisb = (u64) bucket->aisb + aisb / 8;
fib->aisbo = aisb & ZPCI_MSI_MASK;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
free_page((unsigned long) fib);
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
fib->iota = args->iota;
fib->fmb_addr = args->fmb_addr;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
free_page((unsigned long) fib);
return rc;
}
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
if (zdev->fmb)
return -EINVAL;
- zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
+ zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
if (!zdev->fmb)
return -ENOMEM;
- memset(zdev->fmb, 0, sizeof(*zdev->fmb));
WARN_ON((u64) zdev->fmb & 0xf);
args.fmb_addr = virt_to_phys(zdev->fmb);
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = pcilg_instr(&data, req, offset);
- data = data << ((8 - len) * 8);
- data = le64_to_cpu(data);
- if (!rc)
+ rc = s390pci_load(&data, req, offset);
+ if (!rc) {
+ data = data << ((8 - len) * 8);
+ data = le64_to_cpu(data);
*val = (u32) data;
- else
+ } else
*val = 0xffffffff;
return rc;
}
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
- rc = pcistg_instr(data, req, offset);
+ rc = s390pci_store(data, req, offset);
return rc;
}
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_load(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_load(zdev, where, val, size);
+
+ return ret;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_store(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_store(zdev, where, val, size);
+
+ return ret;
}
static struct pci_ops pci_root_ops = {
@@ -474,7 +478,7 @@ scan:
}
/* enable interrupts again */
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
/* check again to not lose initiative */
rmb();
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
};
-static void zpci_unmap_resources(struct pci_dev *pdev)
-{
- resource_size_t len;
- int i;
-
- for (i = 0; i < PCI_BAR_COUNT; i++) {
- len = pci_resource_len(pdev, i);
- if (!len)
- continue;
- pci_iounmap(pdev, (void *) pdev->resource[i].start);
- }
-};
-
struct zpci_dev *zpci_alloc_device(void)
{
struct zpci_dev *zdev;
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
kfree(zdev);
}
-/* Called on removal of pci_dev, leaves zpci and bus device */
-static void zpci_remove_device(struct pci_dev *pdev)
-{
- struct zpci_dev *zdev = get_zdev(pdev);
-
- dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
- zpci_dma_exit_device(zdev);
- zpci_fmb_disable_device(zdev);
- zpci_sysfs_remove_device(&pdev->dev);
- zpci_unmap_resources(pdev);
- list_del(&zdev->entry); /* can be called from init */
- zdev->pdev = NULL;
-}
-
-static void zpci_scan_devices(void)
-{
- struct zpci_dev *zdev;
-
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry)
- if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
- zpci_scan_device(zdev);
- mutex_unlock(&zpci_list_lock);
-}
-
/*
* Too late for any s390 specific setup, since interrupts must be set up
* already which requires DMA setup too and the pci scan will access the
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
return 0;
}
-void pcibios_disable_device(struct pci_dev *pdev)
-{
- zpci_remove_device(pdev);
- pdev->sysdata = NULL;
-}
-
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
spin_lock_init(&bucket->lock);
/* set summary to 1 to be called every time for the ISC */
*zpci_irq_si = 1;
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
return 0;
out_ai:
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
spin_unlock(&zpci_iomap_lock);
}
-static int zpci_create_device_bus(struct zpci_dev *zdev)
+int pcibios_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+
+ zdev->pdev = pdev;
+ zpci_debug_init_device(zdev);
+ zpci_fmb_enable_device(zdev);
+ zpci_map_resources(zdev);
+
+ return 0;
+}
+
+static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
LIST_HEAD(resources);
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
pci_add_resource(&resources, res);
}
- zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
- zdev, &resources);
+ zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
+ zdev, &resources);
if (!zdev->bus)
return -EIO;
@@ -959,6 +930,13 @@ out:
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
+int zpci_disable_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ return clp_disable_fh(zdev);
+}
+EXPORT_SYMBOL_GPL(zpci_disable_device);
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out;
- rc = zpci_create_device_bus(zdev);
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto out_free;
+
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ }
+ rc = zpci_scan_bus(zdev);
if (rc)
- goto out_bus;
+ goto out_disable;
mutex_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
hotplug_ops->create_slot(zdev);
mutex_unlock(&zpci_list_lock);
- if (zdev->state == ZPCI_FN_STATE_STANDBY)
- return 0;
-
- rc = zpci_enable_device(zdev);
- if (rc)
- goto out_start;
return 0;
-out_start:
- mutex_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- if (hotplug_ops)
- hotplug_ops->remove_slot(zdev);
- mutex_unlock(&zpci_list_lock);
-out_bus:
+out_disable:
+ if (zdev->state == ZPCI_FN_STATE_ONLINE)
+ zpci_disable_device(zdev);
+out_free:
zpci_free_domain(zdev);
out:
return rc;
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
goto out;
}
- zpci_debug_init_device(zdev);
- zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
pci_bus_add_devices(zdev->bus);
- /* now that pdev was added to the bus mark it as used */
- zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
-
out:
zpci_dma_exit_device(zdev);
clp_disable_fh(zdev);
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
}
EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
-unsigned int s390_pci_probe = 1;
+unsigned int s390_pci_probe;
EXPORT_SYMBOL_GPL(s390_pci_probe);
char * __init pcibios_setup(char *str)
{
- if (!strcmp(str, "off")) {
- s390_pci_probe = 0;
+ if (!strcmp(str, "on")) {
+ s390_pci_probe = 1;
return NULL;
}
return str;
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
- zpci_scan_devices();
return 0;
out_find:
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index f339fe2feb15..bd34359d1546 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
/*
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
struct zpci_dev *zdev;
int rc;
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
zdev = zpci_alloc_device();
if (IS_ERR(zdev))
return PTR_ERR(zdev);
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
- pr_err("Set PCI FN failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
+ rrb->response.hdr.rsp);
rc = -EIO;
}
clp_free_block(rrb);
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
if (!rc)
/* Success -> store enabled handle in zdev */
zdev->fh = fh;
+
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
if (!rc)
/* Success -> store disabled handle in zdev */
zdev->fh = fh;
- else
- dev_err(&zdev->pdev->dev,
- "Failed to disable fn handle: 0x%x\n", fh);
+
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index a5d07bc2a547..771b82359af4 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -11,12 +11,17 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <asm/debug.h>
#include <asm/pci_dma.h>
static struct dentry *debugfs_root;
+debug_info_t *pci_debug_msg_id;
+EXPORT_SYMBOL_GPL(pci_debug_msg_id);
+debug_info_t *pci_debug_err_id;
+EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
debug_set_level(pci_debug_msg_id, 3);
- zpci_dbg("Debug view initialized\n");
/* error log */
pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
debug_set_level(pci_debug_err_id, 6);
- zpci_err("Debug view initialized\n");
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a547419907c3..f8e69d5bc0a9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
* needs to be redone!
*/
goto no_refresh;
- rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
+
+ rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
- WARN_ON_ONCE(offset > PAGE_SIZE);
-
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
- return dma_addr + offset;
+ return dma_addr + (offset & ~PAGE_MASK);
}
out_free:
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
new file mode 100644
index 000000000000..22eeb9d7ffeb
--- /dev/null
+++ b/arch/s390/pci/pci_insn.c
@@ -0,0 +1,202 @@
+/*
+ * s390 specific pci instructions
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <asm/pci_insn.h>
+#include <asm/processor.h>
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
+{
+ u8 cc, status;
+
+ do {
+ cc = __mpcifc(req, fib, &status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
+ __func__, cc, status);
+ return (cc) ? -EIO : 0;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
+ __func__, cc, status, addr, range);
+ return (cc) ? -EIO : 0;
+}
+
+/* Set Interruption Controls */
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+{
+ asm volatile (
+ " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+}
+
+/* PCI Load */
+static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ if (!cc)
+ *data = __data;
+
+ return cc;
+}
+
+int s390pci_load(u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_load);
+
+/* PCI Store */
+static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store(u64 data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store);
+
+/* PCI Store Block */
+static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store_block(const u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 0297931335e1..b097aed05a9b 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -18,8 +18,9 @@
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
-static unsigned int msihash_shift = 6;
-#define msi_hashfn(nr) hash_long(nr, msihash_shift)
+static const unsigned int msi_hash_bits = 8;
+#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
+#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
static DEFINE_SPINLOCK(msi_map_lock);
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
+ INIT_HLIST_NODE(&map->msi_chain);
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
{
unsigned int i;
- msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
+ msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
- for (i = 0; i < (1U << msihash_shift); i++)
+ for (i = 0; i < MSI_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 272aa4f7657e..83d89bcb44af 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -87,7 +87,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
if (!tb->active) {
global_flush_tlb_page(mm, vaddr);
flush_tsb_user_page(mm, vaddr);
- return;
+ goto out;
}
if (nr == 0)
@@ -98,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
+out:
put_cpu_var(tlb_batch);
}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 8615f7581820..35ee62fccf98 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -258,7 +258,7 @@ static efi_status_t setup_efi_vars(struct boot_params *params)
u64 store_size, remaining_size, var_size;
efi_status_t status;
- if (!sys_table->runtime->query_variable_info)
+ if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -266,7 +266,7 @@ static efi_status_t setup_efi_vars(struct boot_params *params)
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- status = efi_call_phys4(sys_table->runtime->query_variable_info,
+ status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 901177d75ff5..305c68b8d538 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/dmi.h>
@@ -170,6 +171,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
pcibios_fixup_device_resources(dev);
}
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
/*
* Only use DMI information to set this if nothing was passed
* on the kernel command line (which was parsed earlier).
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 94e76620460f..4a9be6ddf054 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto error;
i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSIX) ?
"pcifront-msi-x" :
"pcifront-msi",
@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
dev_dbg(&dev->dev,
"xen: msi already bound to pirq=%d\n", pirq);
}
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
DOMID_SELF);
@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
}
ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
- map_irq.pirq, map_irq.index,
+ map_irq.pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
domid);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c8e1c7b95c3b..ddbd54a9b845 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/edd.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static void __init xen_boot_params_init_edd(void)
+{
+#if IS_ENABLED(CONFIG_EDD)
+ struct xen_platform_op op;
+ struct edd_info *edd_info;
+ u32 *mbr_signature;
+ unsigned nr;
+ int ret;
+
+ edd_info = boot_params.eddbuf;
+ mbr_signature = boot_params.edd_mbr_sig_buffer;
+
+ op.cmd = XENPF_firmware_info;
+
+ op.u.firmware_info.type = XEN_FW_DISK_INFO;
+ for (nr = 0; nr < EDDMAXNR; nr++) {
+ struct edd_info *info = edd_info + nr;
+
+ op.u.firmware_info.index = nr;
+ info->params.length = sizeof(info->params);
+ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
+ &info->params);
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ break;
+
+#define C(x) info->x = op.u.firmware_info.u.disk_info.x
+ C(device);
+ C(version);
+ C(interface_support);
+ C(legacy_max_cylinder);
+ C(legacy_max_head);
+ C(legacy_sectors_per_track);
+#undef C
+ }
+ boot_params.eddbuf_entries = nr;
+
+ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
+ for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
+ op.u.firmware_info.index = nr;
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ break;
+ mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
+ }
+ boot_params.edd_mbr_sig_buf_entries = nr;
+#endif
+}
+
/*
* Set up the GDT and segment registers for -fstack-protector. Until
* we do this, we have to be careful not to call any stack-protected
@@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void)
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
+ xen_boot_params_init_edd();
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
@@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
xen_vcpu_setup(cpu);
- if (xen_have_vector_callback)
+ if (xen_have_vector_callback) {
xen_init_lock_cpu(cpu);
+ if (xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_timer(cpu);
+ }
break;
default:
break;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 09ea61d2e02f..0d466d7c7175 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
+ /*
+ * The IRQ worker on PVHVM goes through the native path and uses the
+ * IPI mechanism.
+ */
+ if (xen_hvm_domain())
+ return 0;
+
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
cpu,
@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu)
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL);
+ if (xen_hvm_domain())
+ return rc;
+
if (per_cpu(xen_irq_work, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
@@ -418,7 +428,7 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu)
{
- while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ if (!xen_hvm_domain())
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
}
@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
static void xen_hvm_cpu_die(unsigned int cpu)
{
- unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ xen_cpu_die(cpu);
native_cpu_die(cpu);
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index f7a080ef0354..8b54603ce816 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu)
int irq;
const char *name;
+ WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+ cpu, per_cpu(lock_kicker_irq, cpu));
+
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+ per_cpu(lock_kicker_irq, cpu) = -1;
}
void __init xen_init_spinlocks(void)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 0296a9522501..3d88bfdf9e1c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
static const struct clock_event_device *xen_clockevent =
&xen_timerop_clockevent;
-static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 };
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu)
struct clock_event_device *evt;
int irq;
+ evt = &per_cpu(xen_clock_events, cpu);
+ WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
+
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
name = kasprintf(GFP_KERNEL, "timer%d", cpu);
@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu)
IRQF_FORCE_RESUME,
name, NULL);
- evt = &per_cpu(xen_clock_events, cpu);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu)
BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu);
unbind_from_irqhandler(evt->irq, NULL);
+ evt->irq = -1;
}
void xen_setup_cpu_clockevents(void)
@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
{
int cpu = smp_processor_id();
xen_setup_runstate_info(cpu);
- xen_setup_timer(cpu);
+ /*
+ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
+ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
+ * early bootup and also during CPU hotplug events).
+ */
xen_setup_cpu_clockevents();
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index ef5356cd280a..0262210cad38 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
+ msg->msg_namelen = 0;
+
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 6a6dfc062d2a..a1c4f0a55583 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
long copied = 0;
lock_sock(sk);
+ msg->msg_namelen = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 6ae5e440436e..ac8688b89705 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -65,44 +65,12 @@ static struct acpi_scan_handler pci_root_handler = {
.detach = acpi_pci_root_remove,
};
-/* Lock to protect both acpi_pci_roots and acpi_pci_drivers lists */
+/* Lock to protect both acpi_pci_roots lists */
static DEFINE_MUTEX(acpi_pci_root_lock);
static LIST_HEAD(acpi_pci_roots);
-static LIST_HEAD(acpi_pci_drivers);
static DEFINE_MUTEX(osc_lock);
-int acpi_pci_register_driver(struct acpi_pci_driver *driver)
-{
- int n = 0;
- struct acpi_pci_root *root;
-
- mutex_lock(&acpi_pci_root_lock);
- list_add_tail(&driver->node, &acpi_pci_drivers);
- if (driver->add)
- list_for_each_entry(root, &acpi_pci_roots, node) {
- driver->add(root);
- n++;
- }
- mutex_unlock(&acpi_pci_root_lock);
-
- return n;
-}
-EXPORT_SYMBOL(acpi_pci_register_driver);
-
-void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
-{
- struct acpi_pci_root *root;
-
- mutex_lock(&acpi_pci_root_lock);
- list_del(&driver->node);
- if (driver->remove)
- list_for_each_entry(root, &acpi_pci_roots, node)
- driver->remove(root);
- mutex_unlock(&acpi_pci_root_lock);
-}
-EXPORT_SYMBOL(acpi_pci_unregister_driver);
-
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle - the ACPI CA node in question.
@@ -413,7 +381,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_status status;
int result;
struct acpi_pci_root *root;
- struct acpi_pci_driver *driver;
u32 flags, base_flags;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
@@ -571,12 +538,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_assign_unassigned_bus_resources(root->bus);
}
- mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry(driver, &acpi_pci_drivers, node)
- if (driver->add)
- driver->add(root);
- mutex_unlock(&acpi_pci_root_lock);
-
/* need to after hot-added ioapic is registered */
if (system_state != SYSTEM_BOOTING)
pci_enable_bridges(root->bus);
@@ -597,16 +558,9 @@ end:
static void acpi_pci_root_remove(struct acpi_device *device)
{
struct acpi_pci_root *root = acpi_driver_data(device);
- struct acpi_pci_driver *driver;
pci_stop_root_bus(root->bus);
- mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry_reverse(driver, &acpi_pci_drivers, node)
- if (driver->remove)
- driver->remove(root);
- mutex_unlock(&acpi_pci_root_lock);
-
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index cd1434eb1de8..033d1179bdb5 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -9,6 +9,9 @@
* Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P.
* Alex Chiang <achiang@hp.com>
*
+ * Copyright (C) 2013 Huawei Tech. Co., Ltd.
+ * Jiang Liu <jiang.liu@huawei.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -28,10 +31,9 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/list.h>
#include <linux/pci.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <linux/dmi.h>
static bool debug;
@@ -61,20 +63,12 @@ ACPI_MODULE_NAME("pci_slot");
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
- acpi_handle root_handle; /* handle of the root bridge */
struct pci_slot *pci_slot; /* corresponding pci_slot */
struct list_head list; /* node in the list of slots */
};
-static int acpi_pci_slot_add(struct acpi_pci_root *root);
-static void acpi_pci_slot_remove(struct acpi_pci_root *root);
-
static LIST_HEAD(slot_list);
static DEFINE_MUTEX(slot_list_lock);
-static struct acpi_pci_driver acpi_pci_slot_driver = {
- .add = acpi_pci_slot_add,
- .remove = acpi_pci_slot_remove,
-};
static int
check_slot(acpi_handle handle, unsigned long long *sun)
@@ -113,21 +107,8 @@ out:
return device;
}
-struct callback_args {
- acpi_walk_callback user_function; /* only for walk_p2p_bridge */
- struct pci_bus *pci_bus;
- acpi_handle root_handle;
-};
-
/*
- * register_slot
- *
- * Called once for each SxFy object in the namespace. Don't worry about
- * calling pci_create_slot multiple times for the same pci_bus:device,
- * since each subsequent call simply bumps the refcount on the pci_slot.
- *
- * The number of calls to pci_destroy_slot from unregister_slot is
- * symmetrical.
+ * Check whether handle has an associated slot and create PCI slot if it has.
*/
static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -137,13 +118,22 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
char name[SLOT_NAME_SIZE];
struct acpi_pci_slot *slot;
struct pci_slot *pci_slot;
- struct callback_args *parent_context = context;
- struct pci_bus *pci_bus = parent_context->pci_bus;
+ struct pci_bus *pci_bus = context;
device = check_slot(handle, &sun);
if (device < 0)
return AE_OK;
+ /*
+ * There may be multiple PCI functions associated with the same slot.
+ * Check whether PCI slot has already been created for this PCI device.
+ */
+ list_for_each_entry(slot, &slot_list, list) {
+ pci_slot = slot->pci_slot;
+ if (pci_slot->bus == pci_bus && pci_slot->number == device)
+ return AE_OK;
+ }
+
slot = kmalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
err("%s: cannot allocate memory\n", __func__);
@@ -158,12 +148,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
- slot->root_handle = parent_context->root_handle;
slot->pci_slot = pci_slot;
- INIT_LIST_HEAD(&slot->list);
- mutex_lock(&slot_list_lock);
list_add(&slot->list, &slot_list);
- mutex_unlock(&slot_list_lock);
get_device(&pci_bus->dev);
@@ -173,131 +159,24 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-/*
- * walk_p2p_bridge - discover and walk p2p bridges
- * @handle: points to an acpi_pci_root
- * @context: p2p_bridge_context pointer
- *
- * Note that when we call ourselves recursively, we pass a different
- * value of pci_bus in the child_context.
- */
-static acpi_status
-walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int device, function;
- unsigned long long adr;
- acpi_status status;
- acpi_handle dummy_handle;
- acpi_walk_callback user_function;
-
- struct pci_dev *dev;
- struct pci_bus *pci_bus;
- struct callback_args child_context;
- struct callback_args *parent_context = context;
-
- pci_bus = parent_context->pci_bus;
- user_function = parent_context->user_function;
-
- status = acpi_get_handle(handle, "_ADR", &dummy_handle);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- device = (adr >> 16) & 0xffff;
- function = adr & 0xffff;
-
- dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
- if (!dev || !dev->subordinate)
- goto out;
-
- child_context.pci_bus = dev->subordinate;
- child_context.user_function = user_function;
- child_context.root_handle = parent_context->root_handle;
-
- dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number);
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- user_function, NULL, &child_context, NULL);
- if (ACPI_FAILURE(status))
- goto out;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- walk_p2p_bridge, NULL, &child_context, NULL);
-out:
- pci_dev_put(dev);
- return AE_OK;
-}
-
-/*
- * walk_root_bridge - generic root bridge walker
- * @root: poiner of an acpi_pci_root
- * @user_function: user callback for slot objects
- *
- * Call user_function for all objects underneath this root bridge.
- * Walk p2p bridges underneath us and call user_function on those too.
- */
-static int
-walk_root_bridge(struct acpi_pci_root *root, acpi_walk_callback user_function)
-{
- acpi_status status;
- acpi_handle handle = root->device->handle;
- struct pci_bus *pci_bus = root->bus;
- struct callback_args context;
-
- context.pci_bus = pci_bus;
- context.user_function = user_function;
- context.root_handle = handle;
-
- dbg("root bridge walk, pci_bus = %x\n", pci_bus->number);
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- user_function, NULL, &context, NULL);
- if (ACPI_FAILURE(status))
- return status;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- walk_p2p_bridge, NULL, &context, NULL);
- if (ACPI_FAILURE(status))
- err("%s: walk_p2p_bridge failure - %d\n", __func__, status);
-
- return status;
-}
-
-/*
- * acpi_pci_slot_add
- * @handle: points to an acpi_pci_root
- */
-static int
-acpi_pci_slot_add(struct acpi_pci_root *root)
+void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle)
{
- acpi_status status;
-
- status = walk_root_bridge(root, register_slot);
- if (ACPI_FAILURE(status))
- err("%s: register_slot failure - %d\n", __func__, status);
-
- return status;
+ mutex_lock(&slot_list_lock);
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ register_slot, NULL, bus, NULL);
+ mutex_unlock(&slot_list_lock);
}
-/*
- * acpi_pci_slot_remove
- * @handle: points to an acpi_pci_root
- */
-static void
-acpi_pci_slot_remove(struct acpi_pci_root *root)
+void acpi_pci_slot_remove(struct pci_bus *bus)
{
struct acpi_pci_slot *slot, *tmp;
- struct pci_bus *pbus;
- acpi_handle handle = root->device->handle;
mutex_lock(&slot_list_lock);
list_for_each_entry_safe(slot, tmp, &slot_list, list) {
- if (slot->root_handle == handle) {
+ if (slot->pci_slot->bus == bus) {
list_del(&slot->list);
- pbus = slot->pci_slot->bus;
pci_destroy_slot(slot->pci_slot);
- put_device(&pbus->dev);
+ put_device(&bus->dev);
kfree(slot);
}
}
@@ -332,5 +211,4 @@ static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
void __init acpi_pci_slot_init(void)
{
dmi_check_system(acpi_pci_slot_dmi_table);
- acpi_pci_register_driver(&acpi_pci_slot_driver);
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5e7e991717d7..f54d1985e594 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1790,7 +1790,6 @@ int __init acpi_scan_init(void)
acpi_platform_init();
acpi_csrt_init();
acpi_container_init();
- acpi_pci_slot_init();
mutex_lock(&acpi_scan_lock);
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6a67b07de494..251e57d38942 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -415,17 +415,17 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
- { PCI_DEVICE(0x1b4b, 0x9123),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
- { PCI_DEVICE(0x1b4b, 0x9125),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
- { PCI_DEVICE(0x1b4b, 0x917a),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
- { PCI_DEVICE(0x1b4b, 0x9192),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
- { PCI_DEVICE(0x1b4b, 0x91a3),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
/* Promise */
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 806c77bfd434..272a3ec35957 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -275,19 +275,18 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
}
if (slot) {
- edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ (i * 0x400);
edev->res[i].end = edev->res[i].start + 0xff;
edev->res[i].flags = IORESOURCE_IO;
} else {
- edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ EISA_VENDOR_ID_OFFSET;
edev->res[i].end = edev->res[i].start + 3;
- edev->res[i].flags = IORESOURCE_BUSY;
+ edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY;
}
+ dev_printk(KERN_DEBUG, &edev->dev, "%pR\n", &edev->res[i]);
if (request_resource(root->res, &edev->res[i]))
goto failed;
}
@@ -314,41 +313,40 @@ static int __init eisa_probe(struct eisa_root_device *root)
{
int i, c;
struct eisa_device *edev;
+ char *enabled_str;
- printk(KERN_INFO "EISA: Probing bus %d at %s\n",
- root->bus_nr, dev_name(root->dev));
+ dev_info(root->dev, "Probing EISA bus %d\n", root->bus_nr);
/* First try to get hold of slot 0. If there is no device
* here, simply fail, unless root->force_probe is set. */
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
- printk(KERN_ERR "EISA: Couldn't allocate mainboard slot\n");
+ dev_err(root->dev, "EISA: Couldn't allocate mainboard slot\n");
return -ENOMEM;
}
- if (eisa_request_resources(root, edev, 0)) {
- printk(KERN_WARNING \
- "EISA: Cannot allocate resource for mainboard\n");
+ if (eisa_init_device(root, edev, 0)) {
kfree(edev);
if (!root->force_probe)
- return -EBUSY;
+ return -ENODEV;
goto force_probe;
}
- if (eisa_init_device(root, edev, 0)) {
- eisa_release_resources(edev);
+ if (eisa_request_resources(root, edev, 0)) {
+ dev_warn(root->dev,
+ "EISA: Cannot allocate resource for mainboard\n");
kfree(edev);
if (!root->force_probe)
- return -ENODEV;
+ return -EBUSY;
goto force_probe;
}
- printk(KERN_INFO "EISA: Mainboard %s detected.\n", edev->id.sig);
+ dev_info(&edev->dev, "EISA: Mainboard %s detected\n", edev->id.sig);
if (eisa_register_device(edev)) {
- printk(KERN_ERR "EISA: Failed to register %s\n",
- edev->id.sig);
+ dev_err(&edev->dev, "EISA: Failed to register %s\n",
+ edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
@@ -358,55 +356,47 @@ static int __init eisa_probe(struct eisa_root_device *root)
for (c = 0, i = 1; i <= root->slots; i++) {
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
- printk(KERN_ERR "EISA: Out of memory for slot %d\n", i);
+ dev_err(root->dev, "EISA: Out of memory for slot %d\n",
+ i);
continue;
}
- if (eisa_request_resources(root, edev, i)) {
- printk(KERN_WARNING \
- "Cannot allocate resource for EISA slot %d\n",
- i);
+ if (eisa_init_device(root, edev, i)) {
kfree(edev);
continue;
}
- if (eisa_init_device(root, edev, i)) {
- eisa_release_resources(edev);
+ if (eisa_request_resources(root, edev, i)) {
+ dev_warn(root->dev,
+ "Cannot allocate resource for EISA slot %d\n",
+ i);
kfree(edev);
continue;
}
-
- printk(KERN_INFO "EISA: slot %d : %s detected",
- i, edev->id.sig);
-
- switch (edev->state) {
- case EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED:
- printk(" (forced enabled)");
- break;
-
- case EISA_CONFIG_FORCED:
- printk(" (forced disabled)");
- break;
-
- case 0:
- printk(" (disabled)");
- break;
- }
-
- printk (".\n");
+
+ if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
+ enabled_str = " (forced enabled)";
+ else if (edev->state == EISA_CONFIG_FORCED)
+ enabled_str = " (forced disabled)";
+ else if (edev->state == 0)
+ enabled_str = " (disabled)";
+ else
+ enabled_str = "";
+
+ dev_info(&edev->dev, "EISA: slot %d: %s detected%s\n", i,
+ edev->id.sig, enabled_str);
c++;
if (eisa_register_device(edev)) {
- printk(KERN_ERR "EISA: Failed to register %s\n",
- edev->id.sig);
+ dev_err(&edev->dev, "EISA: Failed to register %s\n",
+ edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
}
- printk(KERN_INFO "EISA: Detected %d card%s.\n", c, c == 1 ? "" : "s");
-
+ dev_info(root->dev, "EISA: Detected %d card%s\n", c, c == 1 ? "" : "s");
return 0;
}
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 6c3fca97d346..a333bf3517de 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -25,8 +25,7 @@ static int __init pci_eisa_init(struct pci_dev *pdev)
struct resource *res, *bus_res = NULL;
if ((rc = pci_enable_device (pdev))) {
- printk (KERN_ERR "pci_eisa : Could not enable device %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Could not enable device\n");
return rc;
}
@@ -59,7 +58,7 @@ static int __init pci_eisa_init(struct pci_dev *pdev)
dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
if (eisa_root_register (&pci_eisa_root)) {
- printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
+ dev_err(&pdev->dev, "Could not register EISA root\n");
return -1;
}
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 182ce9471175..f4baa11d3644 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -1628,10 +1628,11 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
return count;
}
-static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
+static bool variable_is_present(struct efivars *efivars,
+ efi_char16_t *variable_name,
+ efi_guid_t *vendor)
{
struct efivar_entry *entry, *n;
- struct efivars *efivars = &__efivars;
unsigned long strsize1, strsize2;
bool found = false;
@@ -1703,8 +1704,8 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
if (status != EFI_SUCCESS) {
break;
} else {
- if (!variable_is_present(variable_name,
- &vendor)) {
+ if (!variable_is_present(efivars,
+ variable_name, &vendor)) {
found = true;
break;
}
@@ -2008,7 +2009,8 @@ int register_efivars(struct efivars *efivars,
* we'll ever see a different variable name,
* and may end up looping here forever.
*/
- if (variable_is_present(variable_name, &vendor_guid)) {
+ if (variable_is_present(efivars, variable_name,
+ &vendor_guid)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
status = EFI_NOT_FOUND;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 9cc108d2b770..8325f580c0f1 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -642,7 +642,12 @@ static struct platform_driver pxa_gpio_driver = {
.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
};
-module_platform_driver(pxa_gpio_driver);
+
+static int __init pxa_gpio_init(void)
+{
+ return platform_driver_register(&pxa_gpio_driver);
+}
+postcore_initcall(pxa_gpio_init);
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b287ca33833d..830183737b0f 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -173,7 +173,7 @@ static inline u16 get_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return calc_devid(pdev->bus->number, pdev->devfn);
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
}
static struct iommu_dev_data *get_dev_data(struct device *dev)
@@ -649,26 +649,26 @@ retry:
case EVENT_TYPE_ILL_DEV:
printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
"address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
dump_dte_entry(devid);
break;
case EVENT_TYPE_IO_FAULT:
printk("IO_PAGE_FAULT device=%02x:%02x.%x "
"domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domid, address, flags);
break;
case EVENT_TYPE_DEV_TAB_ERR:
printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
"address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
"domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
@@ -682,13 +682,13 @@ retry:
case EVENT_TYPE_IOTLB_INV_TO:
printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
"address=0x%016llx]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
"address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
default:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index e3c2d74b7684..2f46881256a2 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -406,7 +406,7 @@ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
u32 cap;
cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
- update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
+ update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
return 0;
}
@@ -423,7 +423,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
p += sizeof(*h);
end += h->length;
- find_last_devid_on_pci(PCI_BUS(h->devid),
+ find_last_devid_on_pci(PCI_BUS_NUM(h->devid),
PCI_SLOT(h->devid),
PCI_FUNC(h->devid),
h->cap_ptr);
@@ -784,10 +784,10 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
" last device %02x:%02x.%x flags: %02x\n",
- PCI_BUS(iommu->first_device),
+ PCI_BUS_NUM(iommu->first_device),
PCI_SLOT(iommu->first_device),
PCI_FUNC(iommu->first_device),
- PCI_BUS(iommu->last_device),
+ PCI_BUS_NUM(iommu->last_device),
PCI_SLOT(iommu->last_device),
PCI_FUNC(iommu->last_device),
e->flags);
@@ -801,7 +801,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
"flags: %02x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -813,7 +813,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_SELECT_RANGE_START\t "
"devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -827,11 +827,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
"flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS(e->ext >> 8),
+ PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -846,11 +846,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
"devid: %02x:%02x.%x flags: %02x "
"devid_to: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS(e->ext >> 8),
+ PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -864,7 +864,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
"flags: %02x ext: %08x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -877,7 +877,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
"%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -890,7 +890,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_RANGE_END:
DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
@@ -924,7 +924,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
var, (int)handle,
- PCI_BUS(devid),
+ PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1086,7 +1086,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
DUMP_printk("device: %02x:%02x.%01x cap: %04x "
"seg: %d flags: %01x info %04x\n",
- PCI_BUS(h->devid), PCI_SLOT(h->devid),
+ PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
PCI_FUNC(h->devid), h->cap_ptr,
h->pci_seg, h->flags, h->info);
DUMP_printk(" mmio-addr: %016llx\n",
@@ -1116,7 +1116,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
- iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
+ iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
@@ -1128,9 +1128,9 @@ static int iommu_init_pci(struct amd_iommu *iommu)
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
&misc);
- iommu->first_device = calc_devid(MMIO_GET_BUS(range),
+ iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range),
MMIO_GET_FD(range));
- iommu->last_device = calc_devid(MMIO_GET_BUS(range),
+ iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
@@ -1388,8 +1388,8 @@ static int __init init_unity_map_range(struct ivmd_header *m)
DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
" range_start: %016llx range_end: %016llx flags: %x\n", s,
- PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
+ PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
+ PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e->address_start, e->address_end, m->flags);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index e38ab438bb34..ec36cf63e0ca 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/pci.h>
/*
* Maximum number of IOMMUs supported
@@ -315,9 +316,6 @@
#define MAX_DOMAIN_ID 65536
-/* FIXME: move this macro to <linux/pci.h> */
-#define PCI_BUS(x) (((x) >> 8) & 0xff)
-
/* Protection domain flags */
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
@@ -703,13 +701,6 @@ extern int amd_iommu_max_glx_val;
*/
extern void iommu_flush_all_caches(struct amd_iommu *iommu);
-/* takes bus and device/function and returns the device id
- * FIXME: should that be in generic PCI code? */
-static inline u16 calc_devid(u8 bus, u8 devfn)
-{
- return (((u16)bus) << 8) | devfn;
-}
-
static inline int get_ioapic_devid(int id)
{
struct devid_map *entry;
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index f19cd7367040..4faaf8053f26 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -610,7 +610,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
__func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000,
rate, rate);
- state->estimated_rate[i] = rate;
+ state->estimated_rate[layer] = rate;
}
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index d4de021dc844..31ce7698acb9 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -461,7 +461,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
spin_lock_init(&dev->slock);
- for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) {
+ for (i = 0; i < VID_CHANNEL_NUM; ++i) {
cx25821_init_controls(dev, i);
cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper,
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 8647dc6f52d0..748f8f3e9ff5 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -202,20 +202,16 @@ void pci_bus_add_devices(const struct pci_bus *bus)
if (dev->is_added)
continue;
retval = pci_bus_add_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error adding device (%d)\n",
+ retval);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added);
-
child = dev->subordinate;
-
- if (!child)
- continue;
- pci_bus_add_devices(child);
-
- if (child->is_added)
- continue;
- child->is_added = 1;
+ if (child)
+ pci_bus_add_devices(child);
}
}
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 13e9e63a7266..9fcb87f353d4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -52,15 +52,12 @@ config HOTPLUG_PCI_IBM
When in doubt, say N.
config HOTPLUG_PCI_ACPI
- tristate "ACPI PCI Hotplug driver"
- depends on (!ACPI_DOCK && ACPI) || (ACPI_DOCK)
+ bool "ACPI PCI Hotplug driver"
+ depends on HOTPLUG_PCI=y && ((!ACPI_DOCK && ACPI) || (ACPI_DOCK))
help
Say Y here if you have a system that supports PCI Hotplug using
ACPI.
- To compile this driver as a module, choose M here: the
- module will be called acpiphp.
-
When in doubt, say N.
config HOTPLUG_PCI_ACPI_IBM
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index b70ac00a117e..6fdd49c6f0b9 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -73,8 +73,9 @@ static inline const char *slot_name(struct slot *slot)
*/
struct acpiphp_bridge {
struct list_head list;
+ struct list_head slots;
+ struct kref ref;
acpi_handle handle;
- struct acpiphp_slot *slots;
/* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
struct acpiphp_func *func;
@@ -97,7 +98,7 @@ struct acpiphp_bridge {
* PCI slot information for each *physical* PCI slot
*/
struct acpiphp_slot {
- struct acpiphp_slot *next;
+ struct list_head node;
struct acpiphp_bridge *bridge; /* parent */
struct list_head funcs; /* one slot may have different
objects (i.e. for each function) */
@@ -119,7 +120,6 @@ struct acpiphp_slot {
*/
struct acpiphp_func {
struct acpiphp_slot *slot; /* parent */
- struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
struct list_head sibling;
struct notifier_block nb;
@@ -146,10 +146,6 @@ struct acpiphp_attention_info
#define ACPI_PCI_HOST_HID "PNP0A03"
/* ACPI _STA method value (ignore bit 4; battery present) */
-#define ACPI_STA_PRESENT (0x00000001)
-#define ACPI_STA_ENABLED (0x00000002)
-#define ACPI_STA_SHOW_IN_UI (0x00000004)
-#define ACPI_STA_FUNCTIONING (0x00000008)
#define ACPI_STA_ALL (0x0000000f)
/* bridge flags */
@@ -174,25 +170,24 @@ struct acpiphp_attention_info
/* function prototypes */
/* acpiphp_core.c */
-extern int acpiphp_register_attention(struct acpiphp_attention_info*info);
-extern int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
-extern int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
-extern void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
+int acpiphp_register_attention(struct acpiphp_attention_info*info);
+int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
+void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
/* acpiphp_glue.c */
-extern int acpiphp_glue_init (void);
-extern void acpiphp_glue_exit (void);
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
-extern int acpiphp_enable_slot (struct acpiphp_slot *slot);
-extern int acpiphp_disable_slot (struct acpiphp_slot *slot);
-extern int acpiphp_eject_slot (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
+int acpiphp_enable_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_slot(struct acpiphp_slot *slot);
+int acpiphp_eject_slot(struct acpiphp_slot *slot);
+u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
/* variables */
extern bool acpiphp_debug;
+extern bool acpiphp_disabled;
#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index c2fd3095701f..ca8127950fcd 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -48,6 +49,7 @@
#define SLOT_NAME_SIZE 21 /* {_SUN} */
bool acpiphp_debug;
+bool acpiphp_disabled;
/* local variables */
static struct acpiphp_attention_info *attention_info;
@@ -60,7 +62,9 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+MODULE_PARM_DESC(disable, "disable acpiphp driver");
module_param_named(debug, acpiphp_debug, bool, 0644);
+module_param_named(disable, acpiphp_disabled, bool, 0444);
/* export the attention callback registration methods */
EXPORT_SYMBOL_GPL(acpiphp_register_attention);
@@ -351,27 +355,9 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
}
-static int __init acpiphp_init(void)
+void __init acpiphp_init(void)
{
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- if (acpi_pci_disabled)
- return 0;
-
- /* read all the ACPI info from the system */
- /* initialize internal data structure etc. */
- return acpiphp_glue_init();
-}
-
-
-static void __exit acpiphp_exit(void)
-{
- if (acpi_pci_disabled)
- return;
-
- /* deallocate internal data structures etc. */
- acpiphp_glue_exit();
+ info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+ acpiphp_disabled ? ", disabled by user; please report a bug"
+ : "");
}
-
-module_init(acpiphp_init);
-module_exit(acpiphp_exit);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 270fdbadc19c..96fed19c6d90 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -54,6 +54,7 @@
#include "acpiphp.h"
static LIST_HEAD(bridge_list);
+static DEFINE_MUTEX(bridge_mutex);
#define MY_NAME "acpiphp_glue"
@@ -61,6 +62,7 @@ static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
+static void free_bridge(struct kref *kref);
/* callback routine to check for the existence of a pci dock device */
static acpi_status
@@ -76,6 +78,39 @@ is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
}
}
+static inline void get_bridge(struct acpiphp_bridge *bridge)
+{
+ kref_get(&bridge->ref);
+}
+
+static inline void put_bridge(struct acpiphp_bridge *bridge)
+{
+ kref_put(&bridge->ref, free_bridge);
+}
+
+static void free_bridge(struct kref *kref)
+{
+ struct acpiphp_bridge *bridge;
+ struct acpiphp_slot *slot, *next;
+ struct acpiphp_func *func, *tmp;
+
+ bridge = container_of(kref, struct acpiphp_bridge, ref);
+
+ list_for_each_entry_safe(slot, next, &bridge->slots, node) {
+ list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
+ kfree(func);
+ }
+ kfree(slot);
+ }
+
+ /* Release reference acquired by acpiphp_bridge_handle_to_function() */
+ if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)
+ put_bridge(bridge->func->slot->bridge);
+ put_device(&bridge->pci_bus->dev);
+ pci_dev_put(bridge->pci_dev);
+ kfree(bridge);
+}
+
/*
* the _DCK method can do funny things... and sometimes not
* hah-hah funny.
@@ -154,9 +189,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
acpi_handle tmp;
acpi_status status = AE_OK;
unsigned long long adr, sun;
- int device, function, retval;
+ int device, function, retval, found = 0;
struct pci_bus *pbus = bridge->pci_bus;
struct pci_dev *pdev;
+ u32 val;
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
@@ -170,7 +206,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
- pdev = pbus->self;
+ pdev = bridge->pci_dev;
if (pdev && device_is_managed_by_native_pciehp(pdev))
return AE_OK;
@@ -178,7 +214,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!newfunc)
return AE_NO_MEMORY;
- INIT_LIST_HEAD(&newfunc->sibling);
newfunc->handle = handle;
newfunc->function = function;
@@ -207,14 +242,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
/* search for objects that share the same slot */
- for (slot = bridge->slots; slot; slot = slot->next)
+ list_for_each_entry(slot, &bridge->slots, node)
if (slot->device == device) {
if (slot->sun != sun)
warn("sibling found, but _SUN doesn't match!\n");
+ found = 1;
break;
}
- if (!slot) {
+ if (!found) {
slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
if (!slot) {
kfree(newfunc);
@@ -227,9 +263,9 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
INIT_LIST_HEAD(&slot->funcs);
mutex_init(&slot->crit_sect);
- slot->next = bridge->slots;
- bridge->slots = slot;
-
+ mutex_lock(&bridge_mutex);
+ list_add_tail(&slot->node, &bridge->slots);
+ mutex_unlock(&bridge_mutex);
bridge->nr_slots++;
dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
@@ -247,13 +283,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
newfunc->slot = slot;
+ mutex_lock(&bridge_mutex);
list_add_tail(&newfunc->sibling, &slot->funcs);
+ mutex_unlock(&bridge_mutex);
- pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
- if (pdev) {
+ if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
+ &val, 60*1000))
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
- pci_dev_put(pdev);
- }
if (is_dock_device(handle)) {
/* we don't want to call this device's _EJ0
@@ -290,7 +326,9 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
err_exit:
bridge->nr_slots--;
- bridge->slots = slot->next;
+ mutex_lock(&bridge_mutex);
+ list_del(&slot->node);
+ mutex_unlock(&bridge_mutex);
kfree(slot);
kfree(newfunc);
@@ -315,13 +353,17 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
acpi_status status;
/* must be added to the list prior to calling register_slot */
+ mutex_lock(&bridge_mutex);
list_add(&bridge->list, &bridge_list);
+ mutex_unlock(&bridge_mutex);
/* register all slot objects under this bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
register_slot, NULL, bridge, NULL);
if (ACPI_FAILURE(status)) {
+ mutex_lock(&bridge_mutex);
list_del(&bridge->list);
+ mutex_unlock(&bridge_mutex);
return;
}
@@ -351,178 +393,46 @@ static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle
{
struct acpiphp_bridge *bridge;
struct acpiphp_slot *slot;
- struct acpiphp_func *func;
+ struct acpiphp_func *func = NULL;
+ mutex_lock(&bridge_mutex);
list_for_each_entry(bridge, &bridge_list, list) {
- for (slot = bridge->slots; slot; slot = slot->next) {
+ list_for_each_entry(slot, &bridge->slots, node) {
list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->handle == handle)
+ if (func->handle == handle) {
+ get_bridge(func->slot->bridge);
+ mutex_unlock(&bridge_mutex);
return func;
+ }
}
}
}
+ mutex_unlock(&bridge_mutex);
return NULL;
}
-static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
-{
- acpi_handle dummy_handle;
- struct acpiphp_func *func;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_EJ0", &dummy_handle))) {
- bridge->flags |= BRIDGE_HAS_EJ0;
-
- dbg("found ejectable p2p bridge\n");
-
- /* make link between PCI bridge and PCI function */
- func = acpiphp_bridge_handle_to_function(bridge->handle);
- if (!func)
- return;
- bridge->func = func;
- func->bridge = bridge;
- }
-}
-
-
-/* allocate and initialize host bridge data structure */
-static void add_host_bridge(struct acpi_pci_root *root)
-{
- struct acpiphp_bridge *bridge;
- acpi_handle handle = root->device->handle;
-
- bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL)
- return;
-
- bridge->handle = handle;
-
- bridge->pci_bus = root->bus;
-
- init_bridge_misc(bridge);
-}
-
-
-/* allocate and initialize PCI-to-PCI bridge data structure */
-static void add_p2p_bridge(acpi_handle *handle)
-{
- struct acpiphp_bridge *bridge;
-
- bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL) {
- err("out of memory\n");
- return;
- }
-
- bridge->handle = handle;
- config_p2p_bridge_flags(bridge);
-
- bridge->pci_dev = acpi_get_pci_dev(handle);
- bridge->pci_bus = bridge->pci_dev->subordinate;
- if (!bridge->pci_bus) {
- err("This is not a PCI-to-PCI bridge!\n");
- goto err;
- }
-
- /*
- * Grab a ref to the subordinate PCI bus in case the bus is
- * removed via PCI core logical hotplug. The ref pins the bus
- * (which we access during module unload).
- */
- get_device(&bridge->pci_bus->dev);
-
- init_bridge_misc(bridge);
- return;
- err:
- pci_dev_put(bridge->pci_dev);
- kfree(bridge);
- return;
-}
-
-
-/* callback routine to find P2P bridges */
-static acpi_status
-find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(handle);
- if (!dev || !dev->subordinate)
- goto out;
-
- /* check if this bridge has ejectable slots */
- if ((detect_ejectable_slots(handle) > 0)) {
- dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
- add_p2p_bridge(handle);
- }
-
- /* search P2P bridges under this p2p bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
-
- out:
- pci_dev_put(dev);
- return AE_OK;
-}
-
-
-/* find hot-pluggable slots, and then find P2P bridge */
-static int add_bridge(struct acpi_pci_root *root)
-{
- acpi_status status;
- unsigned long long tmp;
- acpi_handle dummy_handle;
- acpi_handle handle = root->device->handle;
-
- /* if the bridge doesn't have _STA, we assume it is always there */
- status = acpi_get_handle(handle, "_STA", &dummy_handle);
- if (ACPI_SUCCESS(status)) {
- status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dbg("%s: _STA evaluation failure\n", __func__);
- return 0;
- }
- if ((tmp & ACPI_STA_FUNCTIONING) == 0)
- /* don't register this object */
- return 0;
- }
-
- /* check if this bridge has ejectable slots */
- if (detect_ejectable_slots(handle) > 0) {
- dbg("found PCI host-bus bridge with hot-pluggable slots\n");
- add_host_bridge(root);
- }
-
- /* search P2P bridges under this host bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, NULL, NULL, NULL);
-
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
-
- return 0;
-}
-
static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
{
struct acpiphp_bridge *bridge;
+ mutex_lock(&bridge_mutex);
list_for_each_entry(bridge, &bridge_list, list)
- if (bridge->handle == handle)
+ if (bridge->handle == handle) {
+ get_bridge(bridge);
+ mutex_unlock(&bridge_mutex);
return bridge;
+ }
+ mutex_unlock(&bridge_mutex);
return NULL;
}
static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
- struct acpiphp_slot *slot, *next;
- struct acpiphp_func *func, *tmp;
+ struct acpiphp_slot *slot;
+ struct acpiphp_func *func;
acpi_status status;
acpi_handle handle = bridge->handle;
@@ -543,10 +453,8 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
err("failed to install interrupt notify handler\n");
}
- slot = bridge->slots;
- while (slot) {
- next = slot->next;
- list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
+ list_for_each_entry(slot, &bridge->slots, node) {
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (is_dock_device(func->handle)) {
unregister_hotplug_dock_device(func->handle);
unregister_dock_notifier(&func->nb);
@@ -558,63 +466,13 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
- list_del(&func->sibling);
- kfree(func);
}
acpiphp_unregister_hotplug_slot(slot);
- list_del(&slot->funcs);
- kfree(slot);
- slot = next;
}
- /*
- * Only P2P bridges have a pci_dev
- */
- if (bridge->pci_dev)
- put_device(&bridge->pci_bus->dev);
-
- pci_dev_put(bridge->pci_dev);
+ mutex_lock(&bridge_mutex);
list_del(&bridge->list);
- kfree(bridge);
-}
-
-static acpi_status
-cleanup_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- struct acpiphp_bridge *bridge;
-
- /* cleanup p2p bridges under this P2P bridge
- in a depth-first manner */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- cleanup_p2p_bridge, NULL, NULL, NULL);
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- cleanup_bridge(bridge);
-
- return AE_OK;
-}
-
-static void remove_bridge(struct acpi_pci_root *root)
-{
- struct acpiphp_bridge *bridge;
- acpi_handle handle = root->device->handle;
-
- /* cleanup p2p bridges under this host bridge
- in a depth-first manner */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- (u32)1, cleanup_p2p_bridge, NULL, NULL, NULL);
-
- /*
- * On root bridges with hotplug slots directly underneath (ie,
- * no p2p bridge between), we call cleanup_bridge().
- *
- * The else clause cleans up root bridges that either had no
- * hotplug slots at all, or had a p2p bridge underneath.
- */
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- cleanup_bridge(bridge);
+ mutex_unlock(&bridge_mutex);
}
static int power_on_slot(struct acpiphp_slot *slot)
@@ -798,6 +656,7 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
}
}
}
+
/**
* enable_device - enable, configure a slot
* @slot: slot to be enabled
@@ -810,9 +669,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
struct pci_dev *dev;
struct pci_bus *bus = slot->bridge->pci_bus;
struct acpiphp_func *func;
- int retval = 0;
int num, max, pass;
- acpi_status status;
if (slot->flags & SLOT_ENABLED)
goto err_exit;
@@ -867,23 +724,11 @@ static int __ref enable_device(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
continue;
}
-
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
- dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
- pci_dev_put(dev);
- continue;
- }
-
- status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n",
- status);
- pci_dev_put(dev);
}
err_exit:
- return retval;
+ return 0;
}
/* return first device in slot, acquiring a reference on it */
@@ -912,23 +757,6 @@ static int disable_device(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
struct pci_dev *pdev;
- struct pci_bus *bus = slot->bridge->pci_bus;
-
- /* The slot will be enabled when func 0 is added, so check
- func 0 before disable the slot. */
- pdev = pci_get_slot(bus, PCI_DEVFN(slot->device, 0));
- if (!pdev)
- goto err_exit;
- pci_dev_put(pdev);
-
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->bridge) {
- /* cleanup p2p bridges under this P2P bridge */
- cleanup_p2p_bridge(func->bridge->handle,
- (u32)1, NULL, NULL);
- func->bridge = NULL;
- }
- }
/*
* enable_device() enumerates all functions in this device via
@@ -947,7 +775,6 @@ static int disable_device(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
-err_exit:
return 0;
}
@@ -1037,7 +864,7 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
enabled = disabled = 0;
- for (slot = bridge->slots; slot; slot = slot->next) {
+ list_for_each_entry(slot, &bridge->slots, node) {
unsigned int status = get_slot_status(slot);
if (slot->flags & SLOT_ENABLED) {
if (status == ACPI_STA_ALL)
@@ -1082,11 +909,11 @@ static void acpiphp_set_hpp_values(struct pci_bus *bus)
*/
static void acpiphp_sanitize_bus(struct pci_bus *bus)
{
- struct pci_dev *dev;
+ struct pci_dev *dev, *tmp;
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -1118,6 +945,7 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
dbg("%s: re-enumerating slots under %s\n",
__func__, objname);
acpiphp_check_bridge(bridge);
+ put_bridge(bridge);
}
return AE_OK ;
}
@@ -1195,6 +1023,7 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
+ put_bridge(bridge);
}
/**
@@ -1208,6 +1037,8 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
void *context)
{
+ struct acpiphp_bridge *bridge = context;
+
/*
* Currently the code adds all hotplug events to the kacpid_wq
* queue when it should add hotplug events to the kacpi_hotplug_wq.
@@ -1216,6 +1047,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
+ get_bridge(bridge);
alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
}
@@ -1270,6 +1102,7 @@ static void _handle_hotplug_event_func(struct work_struct *work)
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_func */
+ put_bridge(func->slot->bridge);
}
/**
@@ -1283,6 +1116,8 @@ static void _handle_hotplug_event_func(struct work_struct *work)
static void handle_hotplug_event_func(acpi_handle handle, u32 type,
void *context)
{
+ struct acpiphp_func *func = context;
+
/*
* Currently the code adds all hotplug events to the kacpid_wq
* queue when it should add hotplug events to the kacpi_hotplug_wq.
@@ -1291,33 +1126,69 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
+ get_bridge(func->slot->bridge);
alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
}
-static struct acpi_pci_driver acpi_pci_hp_driver = {
- .add = add_bridge,
- .remove = remove_bridge,
-};
-
-/**
- * acpiphp_glue_init - initializes all PCI hotplug - ACPI glue data structures
+/*
+ * Create hotplug slots for the PCI bus.
+ * It should always return 0 to avoid skipping following notifiers.
*/
-int __init acpiphp_glue_init(void)
+void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
{
- acpi_pci_register_driver(&acpi_pci_hp_driver);
+ acpi_handle dummy_handle;
+ struct acpiphp_bridge *bridge;
- return 0;
-}
+ if (acpiphp_disabled)
+ return;
+ if (detect_ejectable_slots(handle) <= 0)
+ return;
-/**
- * acpiphp_glue_exit - terminates all PCI hotplug - ACPI glue data structures
- *
- * This function frees all data allocated in acpiphp_glue_init().
- */
-void acpiphp_glue_exit(void)
+ bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
+ if (bridge == NULL) {
+ err("out of memory\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&bridge->slots);
+ kref_init(&bridge->ref);
+ bridge->handle = handle;
+ bridge->pci_dev = pci_dev_get(bus->self);
+ bridge->pci_bus = bus;
+
+ /*
+ * Grab a ref to the subordinate PCI bus in case the bus is
+ * removed via PCI core logical hotplug. The ref pins the bus
+ * (which we access during module unload).
+ */
+ get_device(&bus->dev);
+
+ if (!pci_is_root_bus(bridge->pci_bus) &&
+ ACPI_SUCCESS(acpi_get_handle(bridge->handle,
+ "_EJ0", &dummy_handle))) {
+ dbg("found ejectable p2p bridge\n");
+ bridge->flags |= BRIDGE_HAS_EJ0;
+ bridge->func = acpiphp_bridge_handle_to_function(handle);
+ }
+
+ init_bridge_misc(bridge);
+}
+
+/* Destroy hotplug slots associated with the PCI bus */
+void acpiphp_remove_slots(struct pci_bus *bus)
{
- acpi_pci_unregister_driver(&acpi_pci_hp_driver);
+ struct acpiphp_bridge *bridge, *tmp;
+
+ if (acpiphp_disabled)
+ return;
+
+ list_for_each_entry_safe(bridge, tmp, &bridge_list, list)
+ if (bridge->pci_bus == bus) {
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
+ break;
+ }
}
/**
@@ -1396,7 +1267,7 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
sta = get_slot_status(slot);
- return (sta & ACPI_STA_SHOW_IN_UI) ? 0 : 1;
+ return (sta & ACPI_STA_DEVICE_UI) ? 0 : 1;
}
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 9fff878cf026..1356211431d0 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -75,28 +75,36 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
-extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
-extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
-extern int cpci_hp_unregister_bus(struct pci_bus *bus);
-extern int cpci_hp_start(void);
-extern int cpci_hp_stop(void);
+int cpci_hp_register_controller(struct cpci_hp_controller *controller);
+int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
+int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
+int cpci_hp_unregister_bus(struct pci_bus *bus);
+int cpci_hp_start(void);
+int cpci_hp_stop(void);
/*
* Internal function prototypes, these functions should not be used by
* board/chassis drivers.
*/
-extern u8 cpci_get_attention_status(struct slot *slot);
-extern u8 cpci_get_latch_status(struct slot *slot);
-extern u8 cpci_get_adapter_status(struct slot *slot);
-extern u16 cpci_get_hs_csr(struct slot * slot);
-extern int cpci_set_attention_status(struct slot *slot, int status);
-extern int cpci_check_and_clear_ins(struct slot * slot);
-extern int cpci_check_ext(struct slot * slot);
-extern int cpci_clear_ext(struct slot * slot);
-extern int cpci_led_on(struct slot * slot);
-extern int cpci_led_off(struct slot * slot);
-extern int cpci_configure_slot(struct slot *slot);
-extern int cpci_unconfigure_slot(struct slot *slot);
+u8 cpci_get_attention_status(struct slot *slot);
+u8 cpci_get_latch_status(struct slot *slot);
+u8 cpci_get_adapter_status(struct slot *slot);
+u16 cpci_get_hs_csr(struct slot * slot);
+int cpci_set_attention_status(struct slot *slot, int status);
+int cpci_check_and_clear_ins(struct slot * slot);
+int cpci_check_ext(struct slot * slot);
+int cpci_clear_ext(struct slot * slot);
+int cpci_led_on(struct slot * slot);
+int cpci_led_off(struct slot * slot);
+int cpci_configure_slot(struct slot *slot);
+int cpci_unconfigure_slot(struct slot *slot);
+
+#ifdef CONFIG_HOTPLUG_PCI_CPCI
+int cpci_hotplug_init(int debug);
+void cpci_hotplug_exit(void);
+#else
+static inline int cpci_hotplug_init(int debug) { return 0; }
+static inline void cpci_hotplug_exit(void) { }
+#endif
#endif /* _CPCI_HOTPLUG_H */
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index d8ffc7366801..516b87738b6e 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -404,50 +404,44 @@ struct resource_lists {
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs(void);
-extern void cpqhp_shutdown_debugfs(void);
-extern void cpqhp_create_debugfs_files(struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
+void cpqhp_initialize_debugfs(void);
+void cpqhp_shutdown_debugfs(void);
+void cpqhp_create_debugfs_files(struct controller *ctrl);
+void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
-extern int cpqhp_find_available_resources(struct controller *ctrl,
- void __iomem *rom_start);
-extern int cpqhp_event_start_thread(void);
-extern void cpqhp_event_stop_thread(void);
-extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
- unsigned char index);
-extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
+void cpqhp_pushbutton_thread(unsigned long event_pointer);
+irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+int cpqhp_event_start_thread(void);
+void cpqhp_event_stop_thread(void);
+struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
-extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
+int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
- u8 slot);
-extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
- int is_hot_plug);
-extern int cpqhp_save_base_addr_length(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_save_used_resources(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_configure_board(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_save_slot_config(struct controller *ctrl,
- struct pci_func *new_slot);
-extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
-extern void cpqhp_destroy_board_resources(struct pci_func *func);
-extern int cpqhp_return_board_resources (struct pci_func *func,
- struct resource_lists *resources);
-extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
-extern int cpqhp_configure_device(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_unconfigure_device(struct pci_func *func);
+int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug);
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func);
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot);
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+void cpqhp_destroy_board_resources(struct pci_func *func);
+int cpqhp_return_board_resources(struct pci_func *func,
+ struct resource_lists *resources);
+void cpqhp_destroy_resource_list(struct resource_lists *resources);
+int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func);
+int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
diff --git a/drivers/pci/hotplug/cpqphp_nvram.h b/drivers/pci/hotplug/cpqphp_nvram.h
index e89c0702119d..34e4e54fcf15 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.h
+++ b/drivers/pci/hotplug/cpqphp_nvram.h
@@ -30,26 +30,26 @@
#ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM
-static inline void compaq_nvram_init (void __iomem *rom_start)
+static inline void compaq_nvram_init(void __iomem *rom_start)
{
return;
}
-static inline int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
+static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl)
{
return 0;
}
-static inline int compaq_nvram_store (void __iomem *rom_start)
+static inline int compaq_nvram_store(void __iomem *rom_start)
{
return 0;
}
#else
-extern void compaq_nvram_init (void __iomem *rom_start);
-extern int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl);
-extern int compaq_nvram_store (void __iomem *rom_start);
+void compaq_nvram_init(void __iomem *rom_start);
+int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl);
+int compaq_nvram_store(void __iomem *rom_start);
#endif
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index a8d391a4957d..8c5b25871d02 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -275,17 +275,17 @@ extern struct list_head ibmphp_slot_head;
* FUNCTION PROTOTYPES *
***********************************************************/
-extern void ibmphp_free_ebda_hpc_queue (void);
-extern int ibmphp_access_ebda (void);
-extern struct slot *ibmphp_get_slot_from_physical_num (u8);
-extern int ibmphp_get_total_hp_slots (void);
-extern void ibmphp_free_ibm_slot (struct slot *);
-extern void ibmphp_free_bus_info_queue (void);
-extern void ibmphp_free_ebda_pci_rsrc_queue (void);
-extern struct bus_info *ibmphp_find_same_bus_num (u32);
-extern int ibmphp_get_bus_index (u8);
-extern u16 ibmphp_get_total_controllers (void);
-extern int ibmphp_register_pci (void);
+void ibmphp_free_ebda_hpc_queue(void);
+int ibmphp_access_ebda(void);
+struct slot *ibmphp_get_slot_from_physical_num(u8);
+int ibmphp_get_total_hp_slots(void);
+void ibmphp_free_ibm_slot(struct slot *);
+void ibmphp_free_bus_info_queue(void);
+void ibmphp_free_ebda_pci_rsrc_queue(void);
+struct bus_info *ibmphp_find_same_bus_num(u32);
+int ibmphp_get_bus_index(u8);
+u16 ibmphp_get_total_controllers(void);
+int ibmphp_register_pci(void);
/* passed parameters */
#define MEM 0
@@ -381,24 +381,24 @@ struct res_needed {
/* functions */
-extern int ibmphp_rsrc_init (void);
-extern int ibmphp_add_resource (struct resource_node *);
-extern int ibmphp_remove_resource (struct resource_node *);
-extern int ibmphp_find_resource (struct bus_node *, u32, struct resource_node **, int);
-extern int ibmphp_check_resource (struct resource_node *, u8);
-extern int ibmphp_remove_bus (struct bus_node *, u8);
-extern void ibmphp_free_resources (void);
-extern int ibmphp_add_pfmem_from_mem (struct resource_node *);
-extern struct bus_node *ibmphp_find_res_bus (u8);
-extern void ibmphp_print_test (void); /* for debugging purposes */
+int ibmphp_rsrc_init(void);
+int ibmphp_add_resource(struct resource_node *);
+int ibmphp_remove_resource(struct resource_node *);
+int ibmphp_find_resource(struct bus_node *, u32, struct resource_node **, int);
+int ibmphp_check_resource(struct resource_node *, u8);
+int ibmphp_remove_bus(struct bus_node *, u8);
+void ibmphp_free_resources(void);
+int ibmphp_add_pfmem_from_mem(struct resource_node *);
+struct bus_node *ibmphp_find_res_bus(u8);
+void ibmphp_print_test(void); /* for debugging purposes */
-extern void ibmphp_hpc_initvars (void);
-extern int ibmphp_hpc_readslot (struct slot *, u8, u8 *);
-extern int ibmphp_hpc_writeslot (struct slot *, u8);
-extern void ibmphp_lock_operations (void);
-extern void ibmphp_unlock_operations (void);
-extern int ibmphp_hpc_start_poll_thread (void);
-extern void ibmphp_hpc_stop_poll_thread (void);
+void ibmphp_hpc_initvars(void);
+int ibmphp_hpc_readslot(struct slot *, u8, u8 *);
+int ibmphp_hpc_writeslot(struct slot *, u8);
+void ibmphp_lock_operations(void);
+void ibmphp_unlock_operations(void);
+int ibmphp_hpc_start_poll_thread(void);
+void ibmphp_hpc_stop_poll_thread(void);
//----------------------------------------------------------------------------
@@ -749,11 +749,11 @@ struct controller {
/* Functions */
-extern int ibmphp_init_devno (struct slot **); /* This function is called from EBDA, so we need it not be static */
-extern int ibmphp_do_disable_slot (struct slot *slot_cur);
-extern int ibmphp_update_slot_info (struct slot *); /* This function is called from HPC, so we need it to not be be static */
-extern int ibmphp_configure_card (struct pci_func *, u8);
-extern int ibmphp_unconfigure_card (struct slot **, int);
+int ibmphp_init_devno(struct slot **); /* This function is called from EBDA, so we need it not be static */
+int ibmphp_do_disable_slot(struct slot *slot_cur);
+int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
+int ibmphp_configure_card(struct pci_func *, u8);
+int ibmphp_unconfigure_card(struct slot **, int);
extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
#endif //__IBMPHP_H
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 202f4a969eb5..ec20f74c8981 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -41,6 +41,7 @@
#include <linux/pci_hotplug.h>
#include <asm/uaccess.h>
#include "../pci.h"
+#include "cpci_hotplug.h"
#define MY_NAME "pci_hotplug"
@@ -63,14 +64,6 @@ static bool debug;
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
-#ifdef CONFIG_HOTPLUG_PCI_CPCI
-extern int cpci_hotplug_init(int debug);
-extern void cpci_hotplug_exit(void);
-#else
-static inline int cpci_hotplug_init(int debug) { return 0; }
-static inline void cpci_hotplug_exit(void) { }
-#endif
-
/* Weee, fun with macros... */
#define GET_STATUS(name,type) \
static int get_##name (struct hotplug_slot *slot, type *value) \
@@ -524,13 +517,11 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
*
* Returns 0 if successful, anything else for an error.
*/
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug,
- struct hotplug_slot_info *info)
+int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+ struct hotplug_slot_info *info)
{
- struct pci_slot *slot;
if (!hotplug || !info)
return -ENODEV;
- slot = hotplug->pci_slot;
memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 2c113de94323..7fb326983ed6 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -127,15 +127,15 @@ struct controller {
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
-extern int pciehp_sysfs_enable_slot(struct slot *slot);
-extern int pciehp_sysfs_disable_slot(struct slot *slot);
-extern u8 pciehp_handle_attention_button(struct slot *p_slot);
-extern u8 pciehp_handle_switch_change(struct slot *p_slot);
-extern u8 pciehp_handle_presence_change(struct slot *p_slot);
-extern u8 pciehp_handle_power_fault(struct slot *p_slot);
-extern int pciehp_configure_device(struct slot *p_slot);
-extern int pciehp_unconfigure_device(struct slot *p_slot);
-extern void pciehp_queue_pushbutton_work(struct work_struct *work);
+int pciehp_sysfs_enable_slot(struct slot *slot);
+int pciehp_sysfs_disable_slot(struct slot *slot);
+u8 pciehp_handle_attention_button(struct slot *p_slot);
+u8 pciehp_handle_switch_change(struct slot *p_slot);
+u8 pciehp_handle_presence_change(struct slot *p_slot);
+u8 pciehp_handle_power_fault(struct slot *p_slot);
+int pciehp_configure_device(struct slot *p_slot);
+int pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
@@ -166,8 +166,8 @@ static inline const char *slot_name(struct slot *slot)
#include <acpi/acpi_bus.h>
#include <linux/pci-acpi.h>
-extern void __init pciehp_acpi_slot_detection_init(void);
-extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
+void __init pciehp_acpi_slot_detection_init(void);
+int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
static inline void pciehp_firmware_init(void)
{
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 24d709b7388c..ead7c534095e 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -90,7 +90,7 @@ static int __init dummy_probe(struct pcie_device *dev)
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- slot->number = slot_cap >> 19;
+ slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19;
list_for_each_entry(tmp, &dummy_slots, list) {
if (tmp->number == slot->number)
dup_slot_id++;
diff --git a/drivers/pci/hotplug/rpadlpar.h b/drivers/pci/hotplug/rpadlpar.h
index 4a0a59b82eae..81df93931ad0 100644
--- a/drivers/pci/hotplug/rpadlpar.h
+++ b/drivers/pci/hotplug/rpadlpar.h
@@ -15,10 +15,10 @@
#ifndef _RPADLPAR_IO_H_
#define _RPADLPAR_IO_H_
-extern int dlpar_sysfs_init(void);
-extern void dlpar_sysfs_exit(void);
+int dlpar_sysfs_init(void);
+void dlpar_sysfs_exit(void);
-extern int dlpar_add_slot(char *drc_name);
-extern int dlpar_remove_slot(char *drc_name);
+int dlpar_add_slot(char *drc_name);
+int dlpar_remove_slot(char *drc_name);
#endif
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index df5677440a08..3135856e5e1c 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -86,18 +86,18 @@ extern struct list_head rpaphp_slot_head;
/* function prototypes */
/* rpaphp_pci.c */
-extern int rpaphp_enable_slot(struct slot *slot);
-extern int rpaphp_get_sensor_state(struct slot *slot, int *state);
+int rpaphp_enable_slot(struct slot *slot);
+int rpaphp_get_sensor_state(struct slot *slot, int *state);
/* rpaphp_core.c */
-extern int rpaphp_add_slot(struct device_node *dn);
-extern int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
+int rpaphp_add_slot(struct device_node *dn);
+int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
char **drc_name, char **drc_type, int *drc_power_domain);
/* rpaphp_slot.c */
-extern void dealloc_slot_struct(struct slot *slot);
-extern struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
-extern int rpaphp_register_slot(struct slot *slot);
-extern int rpaphp_deregister_slot(struct slot *slot);
+void dealloc_slot_struct(struct slot *slot);
+struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
+int rpaphp_register_slot(struct slot *slot);
+int rpaphp_deregister_slot(struct slot *slot);
#endif /* _PPC64PHP_H */
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 7db249a25016..46a7b738f61f 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/init.h>
+#include <asm/pci_debug.h>
#include <asm/sclp.h>
#define SLOT_NAME_SIZE 10
@@ -49,6 +50,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return -EIO;
rc = sclp_pci_configure(slot->zdev->fid);
+ zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, rc);
if (!rc) {
slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
/* automatically scan the device after is was configured */
@@ -66,16 +68,16 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
if (!zpci_fn_configured(slot->zdev->state))
return -EIO;
+ rc = zpci_disable_device(slot->zdev);
+ if (rc)
+ return rc;
/* TODO: we rely on the user to unbind/remove the device, is that plausible
* or do we need to trigger that here?
*/
rc = sclp_pci_deconfigure(slot->zdev->fid);
- if (!rc) {
- /* Fixme: better call List-PCI to find the disabled FH
- for the FID since the FH should be opaque... */
- slot->zdev->fh &= 0x7fffffff;
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, rc);
+ if (!rc)
slot->zdev->state = ZPCI_FN_STATE_STANDBY;
- }
return rc;
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index b849f995075a..e260f207a90e 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -168,19 +168,19 @@ struct controller {
#define WRONG_BUS_FREQUENCY 0x0000000D
#define POWER_FAILURE 0x0000000E
-extern int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
-extern void shpchp_remove_ctrl_files(struct controller *ctrl);
-extern int shpchp_sysfs_enable_slot(struct slot *slot);
-extern int shpchp_sysfs_disable_slot(struct slot *slot);
-extern u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
-extern int shpchp_configure_device(struct slot *p_slot);
-extern int shpchp_unconfigure_device(struct slot *p_slot);
-extern void cleanup_slots(struct controller *ctrl);
-extern void shpchp_queue_pushbutton_work(struct work_struct *work);
-extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
+int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
+void shpchp_remove_ctrl_files(struct controller *ctrl);
+int shpchp_sysfs_enable_slot(struct slot *slot);
+int shpchp_sysfs_disable_slot(struct slot *slot);
+u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
+int shpchp_configure_device(struct slot *p_slot);
+int shpchp_unconfigure_device(struct slot *p_slot);
+void cleanup_slots(struct controller *ctrl);
+void shpchp_queue_pushbutton_work(struct work_struct *work);
+int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index eeb23ceae4a8..e8c31fe20566 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -85,7 +85,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR (ctrl, S_IRUGO, show_ctrl, NULL);
-int __must_check shpchp_create_ctrl_files (struct controller *ctrl)
+int shpchp_create_ctrl_files (struct controller *ctrl)
{
return device_create_file (&ctrl->pci_dev->dev, &dev_attr_ctrl);
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 00cc78c7aa04..d40bed726769 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -22,10 +22,12 @@
#include <linux/slab.h>
#include "pci.h"
-#include "msi.h"
static int pci_msi_enable = 1;
+#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
+
+
/* Arch hooks */
#ifndef arch_msi_check_device
@@ -111,32 +113,26 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)
}
#endif
-static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int enable)
{
u16 control;
- BUG_ON(!pos);
-
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
control &= ~PCI_MSI_FLAGS_ENABLE;
if (enable)
control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
{
- int pos;
u16 control;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- }
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
}
static inline __attribute_const__ u32 msi_mask(unsigned x)
@@ -247,18 +243,18 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
- int pos = entry->msi_attrib.pos;
+ int pos = dev->msi_cap;
u16 data;
- pci_read_config_dword(dev, msi_lower_address_reg(pos),
- &msg->address_lo);
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ &msg->address_lo);
if (entry->msi_attrib.is_64) {
- pci_read_config_dword(dev, msi_upper_address_reg(pos),
- &msg->address_hi);
- pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ &msg->address_hi);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
} else {
msg->address_hi = 0;
- pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
}
msg->data = data;
}
@@ -302,24 +298,24 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
- int pos = entry->msi_attrib.pos;
+ int pos = dev->msi_cap;
u16 msgctl;
- pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
msgctl |= entry->msi_attrib.multiple << 4;
- pci_write_config_word(dev, msi_control_reg(pos), msgctl);
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
- pci_write_config_dword(dev, msi_lower_address_reg(pos),
- msg->address_lo);
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ msg->address_lo);
if (entry->msi_attrib.is_64) {
- pci_write_config_dword(dev, msi_upper_address_reg(pos),
- msg->address_hi);
- pci_write_config_word(dev, msi_data_reg(pos, 1),
- msg->data);
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ msg->address_hi);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
+ msg->data);
} else {
- pci_write_config_word(dev, msi_data_reg(pos, 0),
- msg->data);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
+ msg->data);
}
}
entry->msg = *msg;
@@ -391,7 +387,6 @@ static void pci_intx_for_msi(struct pci_dev *dev, int enable)
static void __pci_restore_msi_state(struct pci_dev *dev)
{
- int pos;
u16 control;
struct msi_desc *entry;
@@ -399,22 +394,20 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
return;
entry = irq_get_msi_desc(dev->irq);
- pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, pos, 0);
+ msi_set_enable(dev, 0);
arch_restore_msi_irqs(dev, dev->irq);
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void __pci_restore_msix_state(struct pci_dev *dev)
{
- int pos;
struct msi_desc *entry;
u16 control;
@@ -422,13 +415,12 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
return;
BUG_ON(list_empty(&dev->msi_list));
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
arch_restore_msi_irqs(dev, entry->irq);
@@ -436,7 +428,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
}
control &= ~PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
}
void pci_restore_msi_state(struct pci_dev *dev)
@@ -484,12 +476,12 @@ static struct msi_attribute mode_attribute =
__ATTR(mode, S_IRUGO, show_msi_mode, NULL);
-struct attribute *msi_irq_default_attrs[] = {
+static struct attribute *msi_irq_default_attrs[] = {
&mode_attribute.attr,
NULL
};
-void msi_kobj_release(struct kobject *kobj)
+static void msi_kobj_release(struct kobject *kobj)
{
struct msi_desc *entry = to_msi_desc(kobj);
@@ -552,27 +544,27 @@ out_unroll:
static int msi_capability_init(struct pci_dev *dev, int nvec)
{
struct msi_desc *entry;
- int pos, ret;
+ int ret;
u16 control;
unsigned mask;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+ msi_set_enable(dev, 0); /* Disable MSI during set up */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
if (!entry)
return -ENOMEM;
entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = is_64bit_address(control);
+ entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = is_mask_bit_support(control);
+ entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.pos = dev->msi_cap;
- entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
+ entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ?
+ PCI_MSI_MASK_64 : PCI_MSI_MASK_32;
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -598,31 +590,30 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, pos, 1);
+ msi_set_enable(dev, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
return 0;
}
-static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
- unsigned nr_entries)
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
{
resource_size_t phys_addr;
u32 table_offset;
u8 bir;
- pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
+ pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+ table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
-static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
- void __iomem *base, struct msix_entry *entries,
- int nvec)
+static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec)
{
struct msi_desc *entry;
int i;
@@ -642,7 +633,7 @@ static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = entries[i].entry;
entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.pos = dev->msix_cap;
entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
@@ -652,7 +643,7 @@ static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
}
static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+ struct msix_entry *entries)
{
struct msi_desc *entry;
int i = 0;
@@ -682,23 +673,22 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{
- int pos, ret;
+ int ret;
u16 control;
void __iomem *base;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Ensure MSI-X is disabled while it is set up */
control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, pos, multi_msix_capable(control));
+ base = msix_map_region(dev, msix_table_size(control));
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, pos, base, entries, nvec);
+ ret = msix_setup_entries(dev, base, entries, nvec);
if (ret)
return ret;
@@ -712,7 +702,7 @@ static int msix_capability_init(struct pci_dev *dev,
* interrupts coming in before they're fully set up.
*/
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
msix_program_entries(dev, entries);
@@ -727,7 +717,7 @@ static int msix_capability_init(struct pci_dev *dev,
dev->msix_enabled = 1;
control &= ~PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
return 0;
@@ -795,9 +785,6 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
if (ret)
return ret;
- if (!pci_find_capability(dev, type))
- return -EINVAL;
-
return 0;
}
@@ -816,13 +803,13 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
*/
int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
{
- int status, pos, maxvec;
+ int status, maxvec;
u16 msgctl;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
+ if (!dev->msi_cap)
return -EINVAL;
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
if (nvec > maxvec)
return maxvec;
@@ -847,14 +834,13 @@ EXPORT_SYMBOL(pci_enable_msi_block);
int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
{
- int ret, pos, nvec;
+ int ret, nvec;
u16 msgctl;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
+ if (!dev->msi_cap)
return -EINVAL;
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
if (maxvec)
@@ -876,21 +862,19 @@ void pci_msi_shutdown(struct pci_dev *dev)
struct msi_desc *desc;
u32 mask;
u16 ctrl;
- unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
BUG_ON(list_empty(&dev->msi_list));
desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pos = desc->msi_attrib.pos;
- msi_set_enable(dev, pos, 0);
+ msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
/* Keep cached state to be restored */
__msi_mask_irq(desc, mask, ~mask);
@@ -917,15 +901,13 @@ EXPORT_SYMBOL(pci_disable_msi);
*/
int pci_msix_table_size(struct pci_dev *dev)
{
- int pos;
u16 control;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!pos)
+ if (!dev->msix_cap)
return 0;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- return multi_msix_capable(control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ return msix_table_size(control);
}
/**
@@ -948,7 +930,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
int status, nr_entries;
int i, j;
- if (!entries)
+ if (!entries || !dev->msix_cap)
return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
@@ -1048,15 +1030,17 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
- int pos;
INIT_LIST_HEAD(&dev->msi_list);
/* Disable the msi hardware to avoid screaming interrupts
* during boot. This is the power on reset default so
* usually this should be a noop.
*/
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (pos)
- msi_set_enable(dev, pos, 0);
- msix_set_enable(dev, 0);
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (dev->msi_cap)
+ msi_set_enable(dev, 0);
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (dev->msix_cap)
+ msix_set_enable(dev, 0);
}
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
deleted file mode 100644
index 65c42f80f23e..000000000000
--- a/drivers/pci/msi.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2003-2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#ifndef MSI_H
-#define MSI_H
-
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
-#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
-#define msi_data_reg(base, is64bit) \
- (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
-#define msi_mask_reg(base, is64bit) \
- (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
-#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
-#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
-
-#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
-#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
-#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable(control) msix_table_size((control))
-
-#endif /* MSI_H */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 5147c210df52..e4b1fb2c0f5d 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -288,6 +288,32 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.run_wake = acpi_pci_run_wake,
};
+void acpi_pci_add_bus(struct pci_bus *bus)
+{
+ acpi_handle handle = NULL;
+
+ if (bus->bridge)
+ handle = ACPI_HANDLE(bus->bridge);
+ if (acpi_pci_disabled || handle == NULL)
+ return;
+
+ acpi_pci_slot_enumerate(bus, handle);
+ acpiphp_enumerate_slots(bus, handle);
+}
+
+void acpi_pci_remove_bus(struct pci_bus *bus)
+{
+ /*
+ * bus->bridge->acpi_node.handle has already been reset to NULL
+ * when acpi_pci_remove_bus() is called, so don't check ACPI handle.
+ */
+ if (acpi_pci_disabled)
+ return;
+
+ acpiphp_remove_slots(bus);
+ acpi_pci_slot_remove(bus);
+}
+
/* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{
@@ -362,7 +388,11 @@ static int __init acpi_pci_init(void)
ret = register_acpi_bus_type(&acpi_pci_bus);
if (ret)
return 0;
+
pci_set_platform_pm(&acpi_pci_platform_pm);
+ acpi_pci_slot_init();
+ acpiphp_init();
+
return 0;
}
arch_initcall(acpi_pci_init);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9c6e9bb674ec..5b4a9d9cd200 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -897,7 +897,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
if (pci_resource_len(pdev, resno) == 0)
return 0;
- nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b099e0025d2b..a899d8bb190d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -646,15 +646,11 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- } else {
+ } else
error = -ENODEV;
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- }
+
+ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
+ dev->current_state = PCI_D0;
return error;
}
@@ -1575,7 +1571,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
- if (!dev->pm_cap)
+ if (!dev->pme_support)
return;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
@@ -1924,6 +1920,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->wakeup_prepared = false;
dev->pm_cap = 0;
+ dev->pme_support = 0;
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
@@ -1975,8 +1972,6 @@ void pci_pm_init(struct pci_dev *dev)
device_set_wakeup_capable(&dev->dev, true);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
- } else {
- dev->pme_support = 0;
}
}
@@ -2619,7 +2614,7 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
pci_release_region(pdev, i);
}
-int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
+static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name, int excl)
{
int i;
@@ -3699,7 +3694,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
* RETURNS: Resource alignment if it is specified.
* Zero if it is not specified.
*/
-resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
{
int seg, bus, slot, func, align_order, count;
resource_size_t align = 0;
@@ -3812,7 +3807,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
}
}
-ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
+static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
{
if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
@@ -3823,7 +3818,7 @@ ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
return count;
}
-ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
+static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
{
size_t count;
spin_lock(&resource_alignment_lock);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7346ee68f47d..68678ed76b0d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -8,26 +8,25 @@
/* Functions internal to the PCI core code */
-extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
{ return; }
static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
{ return; }
#else
-extern void pci_create_firmware_label_files(struct pci_dev *pdev);
-extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
+void pci_create_firmware_label_files(struct pci_dev *pdev);
+void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
-extern void pci_cleanup_rom(struct pci_dev *dev);
+void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
};
-extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
- struct vm_area_struct *vmai,
- enum pci_mmap_api mmap_api);
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
@@ -60,17 +59,17 @@ struct pci_platform_pm_ops {
int (*run_wake)(struct pci_dev *dev, bool enable);
};
-extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
-extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
-extern void pci_power_up(struct pci_dev *dev);
-extern void pci_disable_enabled_device(struct pci_dev *dev);
-extern int pci_finish_runtime_suspend(struct pci_dev *dev);
-extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
-extern void pci_wakeup_bus(struct pci_bus *bus);
-extern void pci_config_pm_runtime_get(struct pci_dev *dev);
-extern void pci_config_pm_runtime_put(struct pci_dev *dev);
-extern void pci_pm_init(struct pci_dev *dev);
-extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+void pci_power_up(struct pci_dev *dev);
+void pci_disable_enabled_device(struct pci_dev *dev);
+int pci_finish_runtime_suspend(struct pci_dev *dev);
+int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+void pci_wakeup_bus(struct pci_bus *bus);
+void pci_config_pm_runtime_get(struct pci_dev *dev);
+void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_init(struct pci_dev *dev);
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
@@ -96,7 +95,7 @@ struct pci_vpd {
struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
};
-extern int pci_vpd_pci22_init(struct pci_dev *dev);
+int pci_vpd_pci22_init(struct pci_dev *dev);
static inline void pci_vpd_release(struct pci_dev *dev)
{
if (dev->vpd)
@@ -105,9 +104,9 @@ static inline void pci_vpd_release(struct pci_dev *dev)
/* PCI /proc functions */
#ifdef CONFIG_PROC_FS
-extern int pci_proc_attach_device(struct pci_dev *dev);
-extern int pci_proc_detach_device(struct pci_dev *dev);
-extern int pci_proc_detach_bus(struct pci_bus *bus);
+int pci_proc_attach_device(struct pci_dev *dev);
+int pci_proc_detach_device(struct pci_dev *dev);
+int pci_proc_detach_bus(struct pci_bus *bus);
#else
static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; }
static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; }
@@ -118,8 +117,8 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
int pci_hp_add_bridge(struct pci_dev *dev);
#ifdef HAVE_PCI_LEGACY
-extern void pci_create_legacy_files(struct pci_bus *bus);
-extern void pci_remove_legacy_files(struct pci_bus *bus);
+void pci_create_legacy_files(struct pci_bus *bus);
+void pci_remove_legacy_files(struct pci_bus *bus);
#else
static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
@@ -134,7 +133,7 @@ extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
-extern void pci_msi_init_pci_dev(struct pci_dev *dev);
+void pci_msi_init_pci_dev(struct pci_dev *dev);
#else
static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
@@ -198,12 +197,11 @@ enum pci_bar_type {
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
-extern int pci_setup_device(struct pci_dev *dev);
-extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
-extern int pci_resource_bar(struct pci_dev *dev, int resno,
- enum pci_bar_type *type);
-extern void pci_configure_ari(struct pci_dev *dev);
+int pci_setup_device(struct pci_dev *dev);
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int reg);
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
+void pci_configure_ari(struct pci_dev *dev);
/**
* pci_ari_enabled - query ARI forwarding status
@@ -217,7 +215,7 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
}
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
-extern void pci_disable_bridge_window(struct pci_dev *dev);
+void pci_disable_bridge_window(struct pci_dev *dev);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -241,7 +239,7 @@ struct pci_sriov {
};
#ifdef CONFIG_PCI_ATS
-extern void pci_restore_ats_state(struct pci_dev *dev);
+void pci_restore_ats_state(struct pci_dev *dev);
#else
static inline void pci_restore_ats_state(struct pci_dev *dev)
{
@@ -249,14 +247,13 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#endif /* CONFIG_PCI_ATS */
#ifdef CONFIG_PCI_IOV
-extern int pci_iov_init(struct pci_dev *dev);
-extern void pci_iov_release(struct pci_dev *dev);
-extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
- enum pci_bar_type *type);
-extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
- int resno);
-extern void pci_restore_iov_state(struct pci_dev *dev);
-extern int pci_iov_bus_range(struct pci_bus *bus);
+int pci_iov_init(struct pci_dev *dev);
+void pci_iov_release(struct pci_dev *dev);
+int pci_iov_resource_bar(struct pci_dev *dev, int resno,
+ enum pci_bar_type *type);
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+void pci_restore_iov_state(struct pci_dev *dev);
+int pci_iov_bus_range(struct pci_bus *bus);
#else
static inline int pci_iov_init(struct pci_dev *dev)
@@ -282,10 +279,10 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
-extern unsigned long pci_cardbus_resource_alignment(struct resource *);
+unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
- struct resource *res)
+ struct resource *res)
{
#ifdef CONFIG_PCI_IOV
int resno = res - dev->resource;
@@ -298,7 +295,7 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
return resource_alignment(res);
}
-extern void pci_enable_acs(struct pci_dev *dev);
+void pci_enable_acs(struct pci_dev *dev);
struct pci_dev_reset_methods {
u16 vendor;
@@ -307,7 +304,7 @@ struct pci_dev_reset_methods {
};
#ifdef CONFIG_PCI_QUIRKS
-extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+int pci_dev_specific_reset(struct pci_dev *dev, int probe);
#else
static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index fde4a32a0295..569f82fc9e22 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -82,4 +82,4 @@ endchoice
config PCIE_PME
def_bool y
- depends on PCIEPORTBUS && PM_RUNTIME && ACPI
+ depends on PCIEPORTBUS && PM_RUNTIME
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 4e24cb8a94ae..587e7e853107 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -212,8 +212,8 @@ out:
return ops->read(bus, devfn, where, size, val);
}
-int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
- u32 val)
+static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
{
u32 *sim;
struct aer_error *err;
@@ -334,13 +334,13 @@ static int aer_inject(struct aer_error_inj *einj)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
- ret = -ENOTTY;
+ ret = -ENODEV;
goto out_put;
}
pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos_cap_err) {
- ret = -ENOTTY;
+ ret = -EPERM;
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
@@ -350,7 +350,7 @@ static int aer_inject(struct aer_error_inj *einj)
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
- ret = -ENOTTY;
+ ret = -EPERM;
goto out_put;
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 22f840f4dda1..d12c77cd6991 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -110,15 +110,15 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-extern void aer_do_secondary_bus_reset(struct pci_dev *dev);
-extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(struct work_struct *work);
-extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
-extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
-extern irqreturn_t aer_irq(int irq, void *context);
+void aer_do_secondary_bus_reset(struct pci_dev *dev);
+int aer_init(struct pcie_device *dev);
+void aer_isr(struct work_struct *work);
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
+irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI_APEI
-extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 564d97f94b6c..8ec8b4f48560 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -89,8 +89,6 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
return -ENOSPC;
}
-#define PCI_BUS(x) (((x) >> 8) & 0xff)
-
/**
* is_error_source - check whether the device is source of reported error
* @dev: pointer to pci_dev to be checked
@@ -106,7 +104,7 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
- if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
/* Device ID match? */
if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
return true;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 9ca0dc9ffd84..795db1f9d50c 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -19,8 +19,6 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pcieport_if.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include "../pci.h"
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index eea2ca2375e6..d2eb80aab569 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -21,18 +21,18 @@
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
extern struct bus_type pcie_port_bus_type;
-extern int pcie_port_device_register(struct pci_dev *dev);
+int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
-extern int pcie_port_device_suspend(struct device *dev);
-extern int pcie_port_device_resume(struct device *dev);
+int pcie_port_device_suspend(struct device *dev);
+int pcie_port_device_resume(struct device *dev);
#endif
-extern void pcie_port_device_remove(struct pci_dev *dev);
-extern int __must_check pcie_port_bus_register(void);
-extern void pcie_port_bus_unregister(void);
+void pcie_port_device_remove(struct pci_dev *dev);
+int __must_check pcie_port_bus_register(void);
+void pcie_port_bus_unregister(void);
struct pci_dev;
-extern void pcie_clear_root_pme_status(struct pci_dev *dev);
+void pcie_clear_root_pme_status(struct pci_dev *dev);
#ifdef CONFIG_HOTPLUG_PCI_PCIE
extern bool pciehp_msi_disabled;
@@ -59,7 +59,7 @@ static inline bool pcie_pme_no_msi(void)
return pcie_pme_msi_disabled;
}
-extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
#else /* !CONFIG_PCIE_PME */
static inline void pcie_pme_disable_msi(void) {}
static inline bool pcie_pme_no_msi(void) { return false; }
@@ -67,7 +67,7 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
#ifdef CONFIG_ACPI
-extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
{
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index a86b56e5f2f2..b4d2894ee3fc 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -17,6 +17,7 @@
#include "aer/aerdrv.h"
#include "../pci.h"
+#include "portdrv.h"
/**
* pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ed4d09498337..696caed5fdf5 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -259,11 +259,9 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
- int ret;
-
- /* can not fail */
- ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
+ /* get true return value from &data */
+ device_for_each_child(&dev->dev, &data, error_detected_iter);
return data.result;
}
@@ -295,10 +293,9 @@ static int mmio_enabled_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int retval;
/* get true return value from &status */
- retval = device_for_each_child(&dev->dev, &status, mmio_enabled_iter);
+ device_for_each_child(&dev->dev, &status, mmio_enabled_iter);
return status;
}
@@ -330,7 +327,6 @@ static int slot_reset_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int retval;
/* If fatal, restore cfg space for possible link reset at upstream */
if (dev->error_state == pci_channel_io_frozen) {
@@ -341,8 +337,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
}
/* get true return value from &status */
- retval = device_for_each_child(&dev->dev, &status, slot_reset_iter);
-
+ device_for_each_child(&dev->dev, &status, slot_reset_iter);
return status;
}
@@ -368,9 +363,7 @@ static int resume_iter(struct device *device, void *data)
static void pcie_portdrv_err_resume(struct pci_dev *dev)
{
- int retval;
- /* nothing to do with error value, if it ever happens */
- retval = device_for_each_child(&dev->dev, NULL, resume_iter);
+ device_for_each_child(&dev->dev, NULL, resume_iter);
}
/*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b494066ef32f..43ece5d41d36 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -673,6 +673,8 @@ add_dev:
ret = device_register(&child->dev);
WARN_ON(ret < 0);
+ pcibios_add_bus(child);
+
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(child);
@@ -1627,8 +1629,7 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
if (!bus->is_added) {
dev_dbg(&bus->dev, "fixups for bus\n");
pcibios_fixup_bus(bus);
- if (pci_is_root_bus(bus))
- bus->is_added = 1;
+ bus->is_added = 1;
}
for (pass=0; pass < 2; pass++)
@@ -1661,6 +1662,14 @@ int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
+void __weak pcibios_add_bus(struct pci_bus *bus)
+{
+}
+
+void __weak pcibios_remove_bus(struct pci_bus *bus)
+{
+}
+
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
@@ -1715,6 +1724,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
if (error)
goto class_dev_reg_err;
+ pcibios_add_bus(b);
+
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(b);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0369fb6fc1da..7d68aeebf56b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -324,29 +324,30 @@ static void quirk_cs5536_vsa(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
-static void quirk_io_region(struct pci_dev *dev, unsigned region,
- unsigned size, int nr, const char *name)
+static void quirk_io_region(struct pci_dev *dev, int port,
+ unsigned size, int nr, const char *name)
{
- region &= ~(size-1);
- if (region) {
- struct pci_bus_region bus_region;
- struct resource *res = dev->resource + nr;
+ u16 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + nr;
- res->name = pci_name(dev);
- res->start = region;
- res->end = region + size - 1;
- res->flags = IORESOURCE_IO;
+ pci_read_config_word(dev, port, &region);
+ region &= ~(size - 1);
- /* Convert from PCI bus to resource space. */
- bus_region.start = res->start;
- bus_region.end = res->end;
- pcibios_bus_to_resource(dev, res, &bus_region);
+ if (!region)
+ return;
- if (pci_claim_resource(dev, nr) == 0)
- dev_info(&dev->dev, "quirk: %pR claimed by %s\n",
- res, name);
- }
-}
+ res->name = pci_name(dev);
+ res->flags = IORESOURCE_IO;
+
+ /* Convert from PCI bus to resource space */
+ bus_region.start = region;
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev, res, &bus_region);
+
+ if (!pci_claim_resource(dev, nr))
+ dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
+}
/*
* ATI Northbridge setups MCE the processor if you even
@@ -374,12 +375,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
*/
static void quirk_ali7101_acpi(struct pci_dev *dev)
{
- u16 region;
-
- pci_read_config_word(dev, 0xE0, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
- pci_read_config_word(dev, 0xE2, &region);
- quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
+ quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
+ quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
@@ -442,12 +439,10 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
*/
static void quirk_piix4_acpi(struct pci_dev *dev)
{
- u32 region, res_a;
+ u32 res_a;
- pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
- pci_read_config_dword(dev, 0x90, &region);
- quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+ quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
+ quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
/* Device resource A has enables for some of the other ones */
pci_read_config_dword(dev, 0x5c, &res_a);
@@ -491,7 +486,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui
*/
static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
- u32 region;
u8 enable;
/*
@@ -503,22 +497,14 @@ static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
*/
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
- if (enable & ICH4_ACPI_EN) {
- pci_read_config_dword(dev, ICH_PMBASE, &region);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES,
- "ICH4 ACPI/GPIO/TCO");
- }
+ if (enable & ICH4_ACPI_EN)
+ quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
+ "ICH4 ACPI/GPIO/TCO");
pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
- if (enable & ICH4_GPIO_EN) {
- pci_read_config_dword(dev, ICH4_GPIOBASE, &region);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 64,
- PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO");
- }
+ if (enable & ICH4_GPIO_EN)
+ quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
+ "ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
@@ -533,26 +519,17 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui
static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
- u32 region;
u8 enable;
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
- if (enable & ICH6_ACPI_EN) {
- pci_read_config_dword(dev, ICH_PMBASE, &region);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES,
- "ICH6 ACPI/GPIO/TCO");
- }
+ if (enable & ICH6_ACPI_EN)
+ quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
+ "ICH6 ACPI/GPIO/TCO");
pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
- if (enable & ICH6_GPIO_EN) {
- pci_read_config_dword(dev, ICH6_GPIOBASE, &region);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 64,
- PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO");
- }
+ if (enable & ICH6_GPIO_EN)
+ quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
+ "ICH6 GPIO");
}
static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
@@ -650,13 +627,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, qui
*/
static void quirk_vt82c586_acpi(struct pci_dev *dev)
{
- u32 region;
-
- if (dev->revision & 0x10) {
- pci_read_config_dword(dev, 0x48, &region);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
- }
+ if (dev->revision & 0x10)
+ quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
+ "vt82c586 ACPI");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
@@ -668,18 +641,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt
*/
static void quirk_vt82c686_acpi(struct pci_dev *dev)
{
- u16 hm;
- u32 smb;
-
quirk_vt82c586_acpi(dev);
- pci_read_config_word(dev, 0x70, &hm);
- hm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon");
+ quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
+ "vt82c686 HW-mon");
- pci_read_config_dword(dev, 0x90, &smb);
- smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB");
+ quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
@@ -690,15 +657,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt
*/
static void quirk_vt8235_acpi(struct pci_dev *dev)
{
- u16 pm, smb;
-
- pci_read_config_word(dev, 0x88, &pm);
- pm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
-
- pci_read_config_word(dev, 0xd0, &smb);
- smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
+ quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
+ quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
@@ -2594,6 +2554,14 @@ static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
+static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
+{
+ /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
+ if (dev->revision < 0x18) {
+ dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
+ dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+ }
+}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780,
quirk_msi_intx_disable_bug);
@@ -2643,6 +2611,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
+ quirk_msi_intx_disable_qca_bug);
#endif /* CONFIG_PCI_MSI */
/* Allow manual resource allocation for PCI hotplug bridges
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index cc875e6ed159..8fc54b7327bc 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -50,10 +50,8 @@ void pci_remove_bus(struct pci_bus *bus)
list_del(&bus->node);
pci_bus_release_busn_res(bus);
up_write(&pci_bus_sem);
- if (!bus->is_added)
- return;
-
pci_remove_legacy_files(bus);
+ pcibios_remove_bus(bus);
device_unregister(&bus->dev);
}
EXPORT_SYMBOL(pci_remove_bus);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7e8739e25b9e..16abaaa1f83c 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1044,7 +1044,7 @@ handle_done:
;
}
-void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+static void __ref __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head)
{
struct pci_dev *dev;
@@ -1545,6 +1545,8 @@ again:
enable_all:
retval = pci_reenable_device(bridge);
+ if (retval)
+ dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval);
pci_set_master(bridge);
pci_enable_bridges(parent);
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 81b88bda7930..07f2eddc09ce 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -261,7 +261,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
resource_size_t align, size;
- struct pci_bus *bus;
int ret;
align = pci_resource_alignment(dev, res);
@@ -271,7 +270,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return -EINVAL;
}
- bus = dev->bus;
size = resource_size(res);
ret = _pci_assign_resource(dev, resno, size, align);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index ac6412fb8d6f..c1e9284a677b 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -377,14 +377,17 @@ void pci_hp_create_module_link(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
struct kobject *kobj = NULL;
- int no_warn;
+ int ret;
if (!slot || !slot->ops)
return;
kobj = kset_find_obj(module_kset, slot->ops->mod_name);
if (!kobj)
return;
- no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (ret)
+ dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
+ ret);
kobject_put(kobj);
}
EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f1b7fdc58a5f..82758cbb220b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -246,7 +246,7 @@ static struct dentry *dasd_debugfs_setup(const char *name,
static int dasd_state_known_to_basic(struct dasd_device *device)
{
struct dasd_block *block = device->block;
- int rc;
+ int rc = 0;
/* Allocate and register gendisk structure. */
if (block) {
@@ -273,7 +273,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
device->state = DASD_STATE_BASIC;
- return 0;
+
+ return rc;
}
/*
@@ -282,6 +283,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
static int dasd_state_basic_to_known(struct dasd_device *device)
{
int rc;
+
if (device->block) {
dasd_profile_exit(&device->block->profile);
if (device->block->debugfs_dentry)
@@ -332,8 +334,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
if (block->base->discipline->do_analysis != NULL)
rc = block->base->discipline->do_analysis(block);
if (rc) {
- if (rc != -EAGAIN)
+ if (rc != -EAGAIN) {
device->state = DASD_STATE_UNFMT;
+ goto out;
+ }
return rc;
}
dasd_setup_queue(block);
@@ -341,11 +345,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
block->blocks << block->s2b_shift);
device->state = DASD_STATE_READY;
rc = dasd_scan_partitions(block);
- if (rc)
+ if (rc) {
device->state = DASD_STATE_BASIC;
+ return rc;
+ }
} else {
device->state = DASD_STATE_READY;
}
+out:
+ if (device->discipline->basic_to_ready)
+ rc = device->discipline->basic_to_ready(device);
return rc;
}
@@ -368,6 +377,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
{
int rc;
+ if (device->discipline->ready_to_basic) {
+ rc = device->discipline->ready_to_basic(device);
+ if (rc)
+ return rc;
+ }
device->state = DASD_STATE_BASIC;
if (device->block) {
struct dasd_block *block = device->block;
@@ -402,16 +416,10 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device)
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
- int rc;
struct gendisk *disk;
struct disk_part_iter piter;
struct hd_struct *part;
- if (device->discipline->ready_to_online) {
- rc = device->discipline->ready_to_online(device);
- if (rc)
- return rc;
- }
device->state = DASD_STATE_ONLINE;
if (device->block) {
dasd_schedule_block_bh(device->block);
@@ -444,6 +452,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
if (rc)
return rc;
}
+
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
disk = device->block->bdev->bd_disk;
@@ -2223,6 +2232,77 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
return rc;
}
+static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
+{
+ struct dasd_ccw_req *cqr;
+
+ list_for_each_entry(cqr, ccw_queue, blocklist) {
+ if (cqr->callback_data != DASD_SLEEPON_END_TAG)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
+{
+ struct dasd_device *device;
+ int rc;
+ struct dasd_ccw_req *cqr, *n;
+
+retry:
+ list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ device = cqr->startdev;
+ if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
+ continue;
+
+ if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+ !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EPERM;
+ continue;
+ }
+ /*Non-temporary stop condition will trigger fail fast*/
+ if (device->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ !dasd_eer_enabled(device)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EAGAIN;
+ continue;
+ }
+
+ /*Don't try to start requests if device is stopped*/
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, !device->stopped);
+ if (rc == -ERESTARTSYS) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, !(device->stopped));
+
+ if (!cqr->callback)
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
+ dasd_add_request_tail(cqr);
+ }
+
+ wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
+
+ rc = 0;
+ list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ if (__dasd_sleep_on_erp(cqr))
+ rc = 1;
+ }
+ if (rc)
+ goto retry;
+
+
+ return 0;
+}
+
/*
* Queue a request to the tail of the device ccw_queue and wait for
* it's completion.
@@ -2233,6 +2313,15 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
}
/*
+ * Start requests from a ccw_queue and wait for their completion.
+ */
+int dasd_sleep_on_queue(struct list_head *ccw_queue)
+{
+ return _dasd_sleep_on_queue(ccw_queue, 0);
+}
+EXPORT_SYMBOL(dasd_sleep_on_queue);
+
+/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
*/
@@ -2663,6 +2752,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
}
/*
+ * Requeue a request back to the block request queue
+ * only works for block requests
+ */
+static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+{
+ struct dasd_block *block = cqr->block;
+ struct request *req;
+ unsigned long flags;
+
+ if (!block)
+ return -EINVAL;
+ spin_lock_irqsave(&block->queue_lock, flags);
+ req = (struct request *) cqr->callback_data;
+ blk_requeue_request(block->request_queue, req);
+ spin_unlock_irqrestore(&block->queue_lock, flags);
+
+ return 0;
+}
+
+/*
* Go through all request on the dasd_block request queue, cancel them
* on the respective dasd_device, and return them to the generic
* block layer.
@@ -3380,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
int dasd_generic_pm_freeze(struct ccw_device *cdev)
{
+ struct dasd_device *device = dasd_device_from_cdev(cdev);
+ struct list_head freeze_queue;
struct dasd_ccw_req *cqr, *n;
+ struct dasd_ccw_req *refers;
int rc;
- struct list_head freeze_queue;
- struct dasd_device *device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
@@ -3396,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
/* disallow new I/O */
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
- /* clear active requests */
+
+ /* clear active requests and requeue them to block layer if possible */
INIT_LIST_HEAD(&freeze_queue);
spin_lock_irq(get_ccwdev_lock(cdev));
rc = 0;
@@ -3416,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
}
list_move_tail(&cqr->devlist, &freeze_queue);
}
-
spin_unlock_irq(get_ccwdev_lock(cdev));
list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
@@ -3424,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
(cqr->status != DASD_CQR_CLEAR_PENDING));
if (cqr->status == DASD_CQR_CLEARED)
cqr->status = DASD_CQR_QUEUED;
+
+ /* requeue requests to blocklayer will only work for
+ block device requests */
+ if (_dasd_requeue_request(cqr))
+ continue;
+
+ /* remove requests from device and block queue */
+ list_del_init(&cqr->devlist);
+ while (cqr->refers != NULL) {
+ refers = cqr->refers;
+ /* remove the request from the block queue */
+ list_del(&cqr->blocklist);
+ /* free the finished erp request */
+ dasd_free_erp_request(cqr, cqr->memdev);
+ cqr = refers;
+ }
+ if (cqr->block)
+ list_del_init(&cqr->blocklist);
+ cqr->block->base->discipline->free_cp(
+ cqr, (struct request *) cqr->callback_data);
}
- /* move freeze_queue to start of the ccw_queue */
- spin_lock_irq(get_ccwdev_lock(cdev));
- list_splice_tail(&freeze_queue, &device->ccw_queue);
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ /*
+ * if requests remain then they are internal request
+ * and go back to the device queue
+ */
+ if (!list_empty(&freeze_queue)) {
+ /* move freeze_queue to start of the ccw_queue */
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ list_splice_tail(&freeze_queue, &device->ccw_queue);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ }
dasd_put_device(device);
return rc;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c196827c228f..a71bb8aaca1d 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -410,8 +410,7 @@ dasd_add_busid(const char *bus_id, int features)
struct dasd_devmap *devmap, *new, *tmp;
int hash;
- new = (struct dasd_devmap *)
- kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
+ new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
spin_lock(&dasd_devmap_lock);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6999fd919e94..6a44b27623ed 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2022,7 +2022,7 @@ static int dasd_eckd_do_analysis(struct dasd_block *block)
return dasd_eckd_end_analysis(block);
}
-static int dasd_eckd_ready_to_online(struct dasd_device *device)
+static int dasd_eckd_basic_to_ready(struct dasd_device *device)
{
return dasd_alias_add_device(device);
};
@@ -2031,6 +2031,11 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
cancel_work_sync(&device->reload_device);
cancel_work_sync(&device->kick_validate);
+ return 0;
+};
+
+static int dasd_eckd_ready_to_basic(struct dasd_device *device)
+{
return dasd_alias_remove_device(device);
};
@@ -2050,45 +2055,34 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
}
static struct dasd_ccw_req *
-dasd_eckd_format_device(struct dasd_device * device,
- struct format_data_t * fdata)
+dasd_eckd_build_format(struct dasd_device *base,
+ struct format_data_t *fdata)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *base_priv;
+ struct dasd_eckd_private *start_priv;
+ struct dasd_device *startdev;
struct dasd_ccw_req *fcp;
struct eckd_count *ect;
+ struct ch_t address;
struct ccw1 *ccw;
void *data;
int rpt;
- struct ch_t address;
int cplength, datasize;
- int i;
+ int i, j;
int intensity = 0;
int r0_perm;
+ int nr_tracks;
- private = (struct dasd_eckd_private *) device->private;
- rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
- set_ch_t(&address,
- fdata->start_unit / private->rdc_data.trk_per_cyl,
- fdata->start_unit % private->rdc_data.trk_per_cyl);
+ startdev = dasd_alias_get_start_dev(base);
+ if (!startdev)
+ startdev = base;
- /* Sanity checks. */
- if (fdata->start_unit >=
- (private->real_cyl * private->rdc_data.trk_per_cyl)) {
- dev_warn(&device->cdev->dev, "Start track number %d used in "
- "formatting is too big\n", fdata->start_unit);
- return ERR_PTR(-EINVAL);
- }
- if (fdata->start_unit > fdata->stop_unit) {
- dev_warn(&device->cdev->dev, "Start track %d used in "
- "formatting exceeds end track\n", fdata->start_unit);
- return ERR_PTR(-EINVAL);
- }
- if (dasd_check_blocksize(fdata->blksize) != 0) {
- dev_warn(&device->cdev->dev,
- "The DASD cannot be formatted with block size %d\n",
- fdata->blksize);
- return ERR_PTR(-EINVAL);
- }
+ start_priv = (struct dasd_eckd_private *) startdev->private;
+ base_priv = (struct dasd_eckd_private *) base->private;
+
+ rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
+
+ nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
/*
* fdata->intensity is a bit string that tells us what to do:
@@ -2106,149 +2100,282 @@ dasd_eckd_format_device(struct dasd_device * device,
r0_perm = 1;
intensity = fdata->intensity;
}
+
switch (intensity) {
case 0x00: /* Normal format */
case 0x08: /* Normal format, use cdl. */
- cplength = 2 + rpt;
- datasize = sizeof(struct DE_eckd_data) +
+ cplength = 2 + (rpt*nr_tracks);
+ datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
- rpt * sizeof(struct eckd_count);
+ rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x01: /* Write record zero and format track. */
case 0x09: /* Write record zero and format track, use cdl. */
- cplength = 3 + rpt;
- datasize = sizeof(struct DE_eckd_data) +
+ cplength = 2 + rpt * nr_tracks;
+ datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count) +
- rpt * sizeof(struct eckd_count);
+ rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x04: /* Invalidate track. */
case 0x0c: /* Invalidate track, use cdl. */
cplength = 3;
- datasize = sizeof(struct DE_eckd_data) +
+ datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count);
break;
default:
- dev_warn(&device->cdev->dev, "An I/O control call used "
- "incorrect flags 0x%x\n", fdata->intensity);
+ dev_warn(&startdev->cdev->dev,
+ "An I/O control call used incorrect flags 0x%x\n",
+ fdata->intensity);
return ERR_PTR(-EINVAL);
}
/* Allocate the format ccw request. */
- fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+ fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
+ datasize, startdev);
if (IS_ERR(fcp))
return fcp;
+ start_priv->count++;
data = fcp->data;
ccw = fcp->cpaddr;
switch (intensity & ~0x08) {
case 0x00: /* Normal format. */
- define_extent(ccw++, (struct DE_eckd_data *) data,
- fdata->start_unit, fdata->start_unit,
- DASD_ECKD_CCW_WRITE_CKD, device);
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, base, startdev);
/* grant subsystem permission to format R0 */
if (r0_perm)
- ((struct DE_eckd_data *)data)->ga_extended |= 0x04;
- data += sizeof(struct DE_eckd_data);
+ ((struct PFX_eckd_data *)data)
+ ->define_extent.ga_extended |= 0x04;
+ data += sizeof(struct PFX_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
- fdata->start_unit, 0, rpt,
- DASD_ECKD_CCW_WRITE_CKD, device,
+ fdata->start_unit, 0, rpt*nr_tracks,
+ DASD_ECKD_CCW_WRITE_CKD, base,
fdata->blksize);
data += sizeof(struct LO_eckd_data);
break;
case 0x01: /* Write record zero + format track. */
- define_extent(ccw++, (struct DE_eckd_data *) data,
- fdata->start_unit, fdata->start_unit,
- DASD_ECKD_CCW_WRITE_RECORD_ZERO,
- device);
- data += sizeof(struct DE_eckd_data);
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+ base, startdev);
+ data += sizeof(struct PFX_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
- fdata->start_unit, 0, rpt + 1,
- DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
- device->block->bp_block);
+ fdata->start_unit, 0, rpt * nr_tracks + 1,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
+ base->block->bp_block);
data += sizeof(struct LO_eckd_data);
break;
case 0x04: /* Invalidate track. */
- define_extent(ccw++, (struct DE_eckd_data *) data,
- fdata->start_unit, fdata->start_unit,
- DASD_ECKD_CCW_WRITE_CKD, device);
- data += sizeof(struct DE_eckd_data);
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+ data += sizeof(struct PFX_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, 1,
- DASD_ECKD_CCW_WRITE_CKD, device, 8);
+ DASD_ECKD_CCW_WRITE_CKD, base, 8);
data += sizeof(struct LO_eckd_data);
break;
}
- if (intensity & 0x01) { /* write record zero */
- ect = (struct eckd_count *) data;
- data += sizeof(struct eckd_count);
- ect->cyl = address.cyl;
- ect->head = address.head;
- ect->record = 0;
- ect->kl = 0;
- ect->dl = 8;
- ccw[-1].flags |= CCW_FLAG_CC;
- ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
- ccw->flags = CCW_FLAG_SLI;
- ccw->count = 8;
- ccw->cda = (__u32)(addr_t) ect;
- ccw++;
- }
- if ((intensity & ~0x08) & 0x04) { /* erase track */
- ect = (struct eckd_count *) data;
- data += sizeof(struct eckd_count);
- ect->cyl = address.cyl;
- ect->head = address.head;
- ect->record = 1;
- ect->kl = 0;
- ect->dl = 0;
- ccw[-1].flags |= CCW_FLAG_CC;
- ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
- ccw->flags = CCW_FLAG_SLI;
- ccw->count = 8;
- ccw->cda = (__u32)(addr_t) ect;
- } else { /* write remaining records */
- for (i = 0; i < rpt; i++) {
+
+ for (j = 0; j < nr_tracks; j++) {
+ /* calculate cylinder and head for the current track */
+ set_ch_t(&address,
+ (fdata->start_unit + j) /
+ base_priv->rdc_data.trk_per_cyl,
+ (fdata->start_unit + j) %
+ base_priv->rdc_data.trk_per_cyl);
+ if (intensity & 0x01) { /* write record zero */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
- ect->record = i + 1;
+ ect->record = 0;
ect->kl = 0;
- ect->dl = fdata->blksize;
- /* Check for special tracks 0-1 when formatting CDL */
- if ((intensity & 0x08) &&
- fdata->start_unit == 0) {
- if (i < 3) {
- ect->kl = 4;
- ect->dl = sizes_trk0[i] - 4;
- }
- }
- if ((intensity & 0x08) &&
- fdata->start_unit == 1) {
- ect->kl = 44;
- ect->dl = LABEL_SIZE - 44;
- }
+ ect->dl = 8;
ccw[-1].flags |= CCW_FLAG_CC;
- ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
ccw++;
}
+ if ((intensity & ~0x08) & 0x04) { /* erase track */
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = address.cyl;
+ ect->head = address.head;
+ ect->record = 1;
+ ect->kl = 0;
+ ect->dl = 0;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) ect;
+ } else { /* write remaining records */
+ for (i = 0; i < rpt; i++) {
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = address.cyl;
+ ect->head = address.head;
+ ect->record = i + 1;
+ ect->kl = 0;
+ ect->dl = fdata->blksize;
+ /*
+ * Check for special tracks 0-1
+ * when formatting CDL
+ */
+ if ((intensity & 0x08) &&
+ fdata->start_unit == 0) {
+ if (i < 3) {
+ ect->kl = 4;
+ ect->dl = sizes_trk0[i] - 4;
+ }
+ }
+ if ((intensity & 0x08) &&
+ fdata->start_unit == 1) {
+ ect->kl = 44;
+ ect->dl = LABEL_SIZE - 44;
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ if (i != 0 || j == 0)
+ ccw->cmd_code =
+ DASD_ECKD_CCW_WRITE_CKD;
+ else
+ ccw->cmd_code =
+ DASD_ECKD_CCW_WRITE_CKD_MT;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) ect;
+ ccw++;
+ }
+ }
}
- fcp->startdev = device;
- fcp->memdev = device;
+
+ fcp->startdev = startdev;
+ fcp->memdev = startdev;
fcp->retries = 256;
+ fcp->expires = startdev->default_expires * HZ;
fcp->buildclk = get_tod_clock();
fcp->status = DASD_CQR_FILLED;
+
return fcp;
}
+static int
+dasd_eckd_format_device(struct dasd_device *base,
+ struct format_data_t *fdata)
+{
+ struct dasd_ccw_req *cqr, *n;
+ struct dasd_block *block;
+ struct dasd_eckd_private *private;
+ struct list_head format_queue;
+ struct dasd_device *device;
+ int old_stop, format_step;
+ int step, rc = 0;
+
+ block = base->block;
+ private = (struct dasd_eckd_private *) base->private;
+
+ /* Sanity checks. */
+ if (fdata->start_unit >=
+ (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+ dev_warn(&base->cdev->dev,
+ "Start track number %u used in formatting is too big\n",
+ fdata->start_unit);
+ return -EINVAL;
+ }
+ if (fdata->stop_unit >=
+ (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+ dev_warn(&base->cdev->dev,
+ "Stop track number %u used in formatting is too big\n",
+ fdata->stop_unit);
+ return -EINVAL;
+ }
+ if (fdata->start_unit > fdata->stop_unit) {
+ dev_warn(&base->cdev->dev,
+ "Start track %u used in formatting exceeds end track\n",
+ fdata->start_unit);
+ return -EINVAL;
+ }
+ if (dasd_check_blocksize(fdata->blksize) != 0) {
+ dev_warn(&base->cdev->dev,
+ "The DASD cannot be formatted with block size %u\n",
+ fdata->blksize);
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&format_queue);
+ old_stop = fdata->stop_unit;
+
+ while (fdata->start_unit <= 1) {
+ fdata->stop_unit = fdata->start_unit;
+ cqr = dasd_eckd_build_format(base, fdata);
+ list_add(&cqr->blocklist, &format_queue);
+
+ fdata->stop_unit = old_stop;
+ fdata->start_unit++;
+
+ if (fdata->start_unit > fdata->stop_unit)
+ goto sleep;
+ }
+
+retry:
+ format_step = 255 / recs_per_track(&private->rdc_data, 0,
+ fdata->blksize);
+ while (fdata->start_unit <= old_stop) {
+ step = fdata->stop_unit - fdata->start_unit + 1;
+ if (step > format_step)
+ fdata->stop_unit = fdata->start_unit + format_step - 1;
+
+ cqr = dasd_eckd_build_format(base, fdata);
+ if (IS_ERR(cqr)) {
+ if (PTR_ERR(cqr) == -ENOMEM) {
+ /*
+ * not enough memory available
+ * go to out and start requests
+ * retry after first requests were finished
+ */
+ fdata->stop_unit = old_stop;
+ goto sleep;
+ } else
+ return PTR_ERR(cqr);
+ }
+ list_add(&cqr->blocklist, &format_queue);
+
+ fdata->start_unit = fdata->stop_unit + 1;
+ fdata->stop_unit = old_stop;
+ }
+
+sleep:
+ dasd_sleep_on_queue(&format_queue);
+
+ list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
+ device = cqr->startdev;
+ private = (struct dasd_eckd_private *) device->private;
+ if (cqr->status == DASD_CQR_FAILED)
+ rc = -EIO;
+ list_del_init(&cqr->blocklist);
+ dasd_sfree_request(cqr, device);
+ private->count--;
+ }
+
+ /*
+ * in case of ENOMEM we need to retry after
+ * first requests are finished
+ */
+ if (fdata->start_unit <= fdata->stop_unit)
+ goto retry;
+
+ return rc;
+}
+
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
{
cqr->status = DASD_CQR_FILLED;
@@ -4305,8 +4432,9 @@ static struct dasd_discipline dasd_eckd_discipline = {
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
.verify_path = dasd_eckd_verify_path,
- .ready_to_online = dasd_eckd_ready_to_online,
+ .basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
+ .ready_to_basic = dasd_eckd_ready_to_basic,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 899e3f5a56e5..0785bd9bd5b6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -300,10 +300,11 @@ struct dasd_discipline {
* Last things to do when a device is set online, and first things
* when it is set offline.
*/
- int (*ready_to_online) (struct dasd_device *);
+ int (*basic_to_ready) (struct dasd_device *);
int (*online_to_ready) (struct dasd_device *);
+ int (*ready_to_basic) (struct dasd_device *);
- /*
+ /* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
@@ -317,8 +318,8 @@ struct dasd_discipline {
int (*start_IO) (struct dasd_ccw_req *);
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
- struct dasd_ccw_req *(*format_device) (struct dasd_device *,
- struct format_data_t *);
+ int (*format_device) (struct dasd_device *,
+ struct format_data_t *);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
@@ -672,6 +673,7 @@ int dasd_term_IO(struct dasd_ccw_req *);
void dasd_schedule_device_bh(struct dasd_device *);
void dasd_schedule_block_bh(struct dasd_block *);
int dasd_sleep_on(struct dasd_ccw_req *);
+int dasd_sleep_on_queue(struct list_head *);
int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
void dasd_device_set_timer(struct dasd_device *, int);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 03c0e0444553..8be1b51e9311 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -143,12 +143,12 @@ static int dasd_ioctl_resume(struct dasd_block *block)
/*
* performs formatting of _device_ according to _fdata_
* Note: The discipline's format_function is assumed to deliver formatting
- * commands to format a single unit of the device. In terms of the ECKD
- * devices this means CCWs are generated to format a single track.
+ * commands to format multiple units of the device. In terms of the ECKD
+ * devices this means CCWs are generated to format multiple tracks.
*/
-static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
+static int
+dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
- struct dasd_ccw_req *cqr;
struct dasd_device *base;
int rc;
@@ -157,8 +157,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
- pr_warning("%s: The DASD cannot be formatted while it is "
- "enabled\n", dev_name(&base->cdev->dev));
+ pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
+ dev_name(&base->cdev->dev));
return -EBUSY;
}
@@ -178,21 +178,10 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
bdput(bdev);
}
- while (fdata->start_unit <= fdata->stop_unit) {
- cqr = base->discipline->format_device(base, fdata);
- if (IS_ERR(cqr))
- return PTR_ERR(cqr);
- rc = dasd_sleep_on_interruptible(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
- if (rc) {
- if (rc != -ERESTARTSYS)
- pr_err("%s: Formatting unit %d failed with "
- "rc=%d\n", dev_name(&base->cdev->dev),
- fdata->start_unit, rc);
- return rc;
- }
- fdata->start_unit++;
- }
+ rc = base->discipline->format_device(base, fdata);
+ if (rc)
+ return rc;
+
return 0;
}
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index e9b9c8392832..b303cab76a7f 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -465,7 +465,7 @@ static int __init scm_blk_init(void)
scm_major = ret;
ret = scm_alloc_rqs(nr_requests);
if (ret)
- goto out_unreg;
+ goto out_free;
scm_debug = debug_register("scm_log", 16, 1, 16);
if (!scm_debug) {
@@ -486,7 +486,6 @@ out_dbf:
debug_unregister(scm_debug);
out_free:
scm_free_rqs();
-out_unreg:
unregister_blkdev(scm_major, "scm");
out:
return ret;
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index f4bb61b0cea1..c0d102e3a48b 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -223,6 +223,8 @@ void scm_cluster_request_irq(struct scm_request *scmrq)
bool scm_cluster_size_valid(void)
{
- return write_cluster_size == 0 || write_cluster_size == 32 ||
- write_cluster_size == 64 || write_cluster_size == 128;
+ if (write_cluster_size == 1 || write_cluster_size > 128)
+ return false;
+
+ return !(write_cluster_size & (write_cluster_size - 1));
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 7b00fa634d40..eb5d22795c47 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -502,7 +502,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
#ifdef CONFIG_TN3215_CONSOLE
- wait_cons_dev();
+ ccw_device_wait_idle(raw->cdev);
#endif
/* Enough room freed up ? */
if (RAW3215_BUFFER_SIZE - raw->count >= length)
@@ -858,7 +858,7 @@ static void con3215_flush(void)
raw = raw3215[0]; /* console 3215 is the first one */
if (raw->port.flags & ASYNC_SUSPENDED)
/* The console is still frozen for suspend. */
- if (ccw_device_force_console())
+ if (ccw_device_force_console(raw->cdev))
/* Forcing didn't work, no panic message .. */
return;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index f4ff515db251..0da3ae3cd63b 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -174,8 +174,7 @@ static void mon_free_mem(struct mon_private *monpriv)
int i;
for (i = 0; i < MON_MSGLIM; i++)
- if (monpriv->msg_array[i])
- kfree(monpriv->msg_array[i]);
+ kfree(monpriv->msg_array[i]);
kfree(monpriv);
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 4c9030a5b9f2..24a08e8f19e1 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -796,7 +796,7 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
do {
__raw3270_reset_device(rp);
while (!raw3270_state_final(rp)) {
- wait_cons_dev();
+ ccw_device_wait_idle(rp->cdev);
barrier();
}
} while (rp->state != RAW3270_STATE_READY);
@@ -810,7 +810,7 @@ raw3270_wait_cons_dev(struct raw3270 *rp)
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- wait_cons_dev();
+ ccw_device_wait_idle(rp->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
@@ -1274,7 +1274,7 @@ void raw3270_pm_unfreeze(struct raw3270_view *view)
rp = view->dev;
if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
- ccw_device_force_console();
+ ccw_device_force_console(rp->cdev);
#endif
}
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index cd798386b622..178836ec252b 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -561,6 +561,8 @@ static void __init sclp_add_standby_memory(void)
add_memory_merged(0);
}
+#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
+
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
@@ -573,7 +575,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
new_incr->rn = rn;
new_incr->standby = standby;
if (!standby)
- new_incr->usecount = 1;
+ new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1d61a01576d2..22820610022c 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -127,7 +127,7 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs, buf,
- PAGE_SIZE))
+ count - offs))
return -EFAULT;
} else
memcpy(dest + offs, buf, count - offs);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 50ad5fdd815d..21fabc6d5a9c 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -377,6 +377,26 @@ static void chp_release(struct device *dev)
}
/**
+ * chp_update_desc - update channel-path description
+ * @chp - channel-path
+ *
+ * Update the channel-path description of the specified channel-path.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+ int rc;
+
+ rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
+ if (rc)
+ return rc;
+
+ rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+
+ return rc;
+}
+
+/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
*
@@ -403,7 +423,7 @@ int chp_new(struct chp_id chpid)
mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
- ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index e1399dbee834..9284b785a06f 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -44,6 +44,7 @@ struct channel_path {
struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
+ struct channel_path_desc_fmt1 desc_fmt1;
/* Channel-measurement related stuff: */
int cmg;
int shared;
@@ -62,6 +63,7 @@ int chp_is_registered(struct chp_id chpid);
void *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index e16c553f6556..8ea7d9b2c671 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -376,7 +376,7 @@ static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
continue;
}
mutex_lock(&chp->lock);
- chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ chp_update_desc(chp);
mutex_unlock(&chp->lock);
}
}
@@ -631,8 +631,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
* Redo PathVerification on the devices the chpid connects to
*/
if (on) {
- /* Try to update the channel path descritor. */
- chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &chpid);
} else
@@ -825,9 +825,10 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
{
struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
+ unsigned long flags;
int ret;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
scpd_area = chsc_page;
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
if (ret)
@@ -835,7 +836,7 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
chsc_resp = (void *)&scpd_area->response;
memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 986ef6a92a41..935d80b4e9ce 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -471,15 +471,6 @@ int cio_disable_subchannel(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-int cio_create_sch_lock(struct subchannel *sch)
-{
- sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
- if (!sch->lock)
- return -ENOMEM;
- spin_lock_init(sch->lock);
- return 0;
-}
-
static int cio_check_devno_blacklisted(struct subchannel *sch)
{
if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
@@ -536,32 +527,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
sprintf(dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT(4, dbf_txt);
- /* Nuke all fields. */
- memset(sch, 0, sizeof(struct subchannel));
-
- sch->schid = schid;
- if (cio_is_console(schid)) {
- sch->lock = cio_get_console_lock();
- } else {
- err = cio_create_sch_lock(sch);
- if (err)
- goto out;
- }
- mutex_init(&sch->reg_mutex);
-
/*
* The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
+ * indicates that there aren't any more devices available.
* If stsch gets an exception, it means the current subchannel set
- * is not valid.
+ * is not valid.
*/
- ccode = stsch_err (schid, &sch->schib);
+ ccode = stsch_err(schid, &sch->schib);
if (ccode) {
err = (ccode == 3) ? -ENXIO : ccode;
goto out;
}
- /* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
+ sch->schid = schid;
switch (sch->st) {
case SUBCHANNEL_TYPE_IO:
@@ -578,11 +556,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
- return 0;
out:
- if (!cio_is_console(schid))
- kfree(sch->lock);
- sch->lock = NULL;
return err;
}
@@ -650,15 +624,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
}
#ifdef CONFIG_CCW_CONSOLE
-static struct subchannel console_subchannel;
-static struct io_subchannel_private console_priv;
-static int console_subchannel_in_use;
+static struct subchannel *console_sch;
/*
* Use cio_tsch to update the subchannel status and call the interrupt handler
- * if status had been pending. Called with the console_subchannel lock.
+ * if status had been pending. Called with the subchannel's lock held.
*/
-static void cio_tsch(struct subchannel *sch)
+void cio_tsch(struct subchannel *sch)
{
struct irb *irb;
int irq_context;
@@ -675,6 +647,7 @@ static void cio_tsch(struct subchannel *sch)
local_bh_disable();
irq_enter();
}
+ kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
@@ -685,135 +658,90 @@ static void cio_tsch(struct subchannel *sch)
}
}
-void *cio_get_console_priv(void)
-{
- return &console_priv;
-}
-
-/*
- * busy wait for the next interrupt on the console
- */
-void wait_cons_dev(void)
+static int cio_test_for_console(struct subchannel_id schid, void *data)
{
- if (!console_subchannel_in_use)
- return;
-
- while (1) {
- cio_tsch(&console_subchannel);
- if (console_subchannel.schib.scsw.cmd.actl == 0)
- break;
- udelay_simple(100);
- }
-}
+ struct schib schib;
-static int
-cio_test_for_console(struct subchannel_id schid, void *data)
-{
- if (stsch_err(schid, &console_subchannel.schib) != 0)
+ if (stsch_err(schid, &schib) != 0)
return -ENXIO;
- if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
- console_subchannel.schib.pmcw.dnv &&
- (console_subchannel.schib.pmcw.dev == console_devno)) {
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == console_devno)) {
console_irq = schid.sch_no;
return 1; /* found */
}
return 0;
}
-
-static int
-cio_get_console_sch_no(void)
+static int cio_get_console_sch_no(void)
{
struct subchannel_id schid;
-
+ struct schib schib;
+
init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
- if (stsch_err(schid, &console_subchannel.schib) != 0 ||
- (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
- !console_subchannel.schib.pmcw.dnv)
+ if (stsch_err(schid, &schib) != 0 ||
+ (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
return -1;
- console_devno = console_subchannel.schib.pmcw.dev;
+ console_devno = schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
- if (console_irq == -1)
- return -1;
- } else {
- /* unlike in 2.4, we cannot autoprobe here, since
- * the channel subsystem is not fully initialized.
- * With some luck, the HWC console can take over */
- return -1;
}
return console_irq;
}
-struct subchannel *
-cio_probe_console(void)
+struct subchannel *cio_probe_console(void)
{
- int sch_no, ret;
struct subchannel_id schid;
+ struct subchannel *sch;
+ int sch_no, ret;
- if (xchg(&console_subchannel_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
- console_subchannel_in_use = 0;
pr_warning("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
- memset(&console_subchannel, 0, sizeof(struct subchannel));
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- ret = cio_validate_subchannel(&console_subchannel, schid);
- if (ret) {
- console_subchannel_in_use = 0;
- return ERR_PTR(-ENODEV);
- }
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return sch;
- /*
- * enable console I/O-interrupt subclass
- */
isc_register(CONSOLE_ISC);
- console_subchannel.config.isc = CONSOLE_ISC;
- console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
- ret = cio_commit_config(&console_subchannel);
+ sch->config.isc = CONSOLE_ISC;
+ sch->config.intparm = (u32)(addr_t)sch;
+ ret = cio_commit_config(sch);
if (ret) {
isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
+ put_device(&sch->dev);
return ERR_PTR(ret);
}
- return &console_subchannel;
-}
-
-void
-cio_release_console(void)
-{
- console_subchannel.config.intparm = 0;
- cio_commit_config(&console_subchannel);
- isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
+ console_sch = sch;
+ return sch;
}
-/* Bah... hack to catch console special sausages. */
-int
-cio_is_console(struct subchannel_id schid)
+int cio_is_console(struct subchannel_id schid)
{
- if (!console_subchannel_in_use)
+ if (!console_sch)
return 0;
- return schid_equal(&schid, &console_subchannel.schid);
+ return schid_equal(&schid, &console_sch->schid);
}
-struct subchannel *
-cio_get_console_subchannel(void)
+void cio_register_early_subchannels(void)
{
- if (!console_subchannel_in_use)
- return NULL;
- return &console_subchannel;
+ int ret;
+
+ if (!console_sch)
+ return;
+
+ ret = css_register_subchannel(console_sch);
+ if (ret)
+ put_device(&console_sch->dev);
}
+#endif /* CONFIG_CCW_CONSOLE */
-#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 4a1ff5c2eb88..d62f5e7f3cf1 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -121,23 +121,18 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
-int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(u8 isc);
void do_IRQ(struct pt_regs *);
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
-extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
-extern struct subchannel *cio_get_console_subchannel(void);
-extern spinlock_t * cio_get_console_lock(void);
-extern void *cio_get_console_priv(void);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
-#define cio_get_console_subchannel() NULL
-#define cio_get_console_lock() NULL
-#define cio_get_console_priv() NULL
+static inline void cio_register_early_subchannels(void) {}
#endif
#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a239237d43f3..1ebe5d3ddebb 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -137,37 +137,53 @@ out:
static void css_sch_todo(struct work_struct *work);
-static struct subchannel *
-css_alloc_subchannel(struct subchannel_id schid)
+static int css_sch_create_locks(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+
+ spin_lock_init(sch->lock);
+ mutex_init(&sch->reg_mutex);
+
+ return 0;
+}
+
+static void css_subchannel_release(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ kfree(sch->lock);
+ kfree(sch);
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
- sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
- if (sch == NULL)
+ sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+ if (!sch)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel (sch, schid);
- if (ret < 0) {
- kfree(sch);
- return ERR_PTR(ret);
- }
+
+ ret = cio_validate_subchannel(sch, schid);
+ if (ret < 0)
+ goto err;
+
+ ret = css_sch_create_locks(sch);
+ if (ret)
+ goto err;
+
INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
return sch;
-}
-
-static void
-css_subchannel_release(struct device *dev)
-{
- struct subchannel *sch;
- sch = to_subchannel(dev);
- if (!cio_is_console(sch->schid)) {
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- kfree(sch->lock);
- kfree(sch);
- }
+err:
+ kfree(sch);
+ return ERR_PTR(ret);
}
static int css_sch_device_register(struct subchannel *sch)
@@ -177,7 +193,7 @@ static int css_sch_device_register(struct subchannel *sch)
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
- ret = device_register(&sch->dev);
+ ret = device_add(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
@@ -228,16 +244,11 @@ void css_update_ssd_info(struct subchannel *sch)
{
int ret;
- if (cio_is_console(sch->schid)) {
- /* Console is initialized too early for functions requiring
- * memory allocation. */
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- } else {
- ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
- if (ret)
- ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- ssd_register_chpids(&sch->ssd_info);
- }
+
+ ssd_register_chpids(&sch->ssd_info);
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -275,14 +286,13 @@ static const struct attribute_group *default_subch_attr_groups[] = {
NULL,
};
-static int css_register_subchannel(struct subchannel *sch)
+int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
- sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
@@ -314,23 +324,19 @@ static int css_register_subchannel(struct subchannel *sch)
return ret;
}
-int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid)
{
- int ret;
struct subchannel *sch;
+ int ret;
+
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
- if (cio_is_console(schid))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(schid);
- if (IS_ERR(sch))
- return PTR_ERR(sch);
- }
ret = css_register_subchannel(sch);
- if (ret) {
- if (!cio_is_console(schid))
- put_device(&sch->dev);
- }
+ if (ret)
+ put_device(&sch->dev);
+
return ret;
}
@@ -770,7 +776,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
- ret = cio_create_sch_lock(css->pseudo_subchannel);
+ ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
@@ -870,8 +876,7 @@ static struct notifier_block css_power_notifier = {
/*
* Now that the driver core is running, we can setup our channel subsystem.
- * The struct subchannel's are created during probing (except for the
- * static console subchannel).
+ * The struct subchannel's are created during probing.
*/
static int __init css_bus_init(void)
{
@@ -1050,6 +1055,8 @@ int css_complete_work(void)
*/
static int __init channel_subsystem_init_sync(void)
{
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
css_complete_work();
@@ -1065,9 +1072,8 @@ void channel_subsystem_reinit(void)
chsc_enable_facility(CHSC_SDA_OC_MSS);
chp_id_for_each(&chpid) {
chp = chpid_to_chp(chpid);
- if (!chp)
- continue;
- chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ if (chp)
+ chp_update_desc(chp);
}
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4af3dfe70ef5..b1de60335238 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -101,7 +101,8 @@ extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
-extern int css_probe_device(struct subchannel_id);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
@@ -109,7 +110,6 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
-extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index c6767f5a58b2..1ab5f6c36d9b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -19,6 +19,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/kernel_stat.h>
@@ -43,6 +44,10 @@ static DEFINE_SPINLOCK(recovery_lock);
static int recovery_phase;
static const unsigned long recovery_delay[] = { 3, 30, 300 };
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
@@ -127,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-static struct bus_type ccw_bus_type;
-
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static int io_subchannel_remove(struct subchannel *);
@@ -137,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(unsigned long data);
-wait_queue_head_t ccw_device_init_wq;
-atomic_t ccw_device_init_count;
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -191,10 +192,7 @@ int __init io_subchannel_init(void)
{
int ret;
- init_waitqueue_head(&ccw_device_init_wq);
- atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
-
ret = bus_register(&ccw_bus_type);
if (ret)
return ret;
@@ -1086,19 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev = sch_get_cdev(sch);
- cdev->dev.groups = ccwdev_attr_groups;
- device_initialize(&cdev->dev);
- cdev->private->flags.initialized = 1;
- ccw_device_register(cdev);
- /*
- * Check if the device is already online. If it is
- * the reference count needs to be corrected since we
- * didn't obtain a reference in ccw_device_set_online.
- */
- if (cdev->private->state != DEV_STATE_NOT_OPER &&
- cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_BOXED)
- get_device(&cdev->dev);
+ rc = ccw_device_register(cdev);
+ if (rc) {
+ /* Release online reference. */
+ put_device(&cdev->dev);
+ goto out_schedule;
+ }
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
return 0;
}
io_subchannel_init_fields(sch);
@@ -1580,88 +1573,102 @@ out:
}
#ifdef CONFIG_CCW_CONSOLE
-static struct ccw_device console_cdev;
-static struct ccw_device_private console_private;
-static int console_cdev_in_use;
-
-static DEFINE_SPINLOCK(ccw_console_lock);
-
-spinlock_t * cio_get_console_lock(void)
-{
- return &ccw_console_lock;
-}
-
static int ccw_device_console_enable(struct ccw_device *cdev,
struct subchannel *sch)
{
- struct io_subchannel_private *io_priv = cio_get_console_priv();
int rc;
- /* Attach subchannel private data. */
- memset(io_priv, 0, sizeof(*io_priv));
- set_io_private(sch, io_priv);
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
- /* Initialize the ccw_device structure. */
- cdev->dev.parent= &sch->dev;
sch_set_cdev(sch, cdev);
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- rc = -EIO;
- if (cdev->private->state != DEV_STATE_OFFLINE)
+ ccw_device_wait_idle(cdev);
+
+ /* Hold on to an extra reference while device is online. */
+ get_device(&cdev->dev);
+ rc = ccw_device_online(cdev);
+ if (rc)
goto out_unlock;
- ccw_device_online(cdev);
+
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- if (cdev->private->state != DEV_STATE_ONLINE)
- goto out_unlock;
- rc = 0;
+ ccw_device_wait_idle(cdev);
+
+ if (cdev->private->state == DEV_STATE_ONLINE)
+ cdev->online = 1;
+ else
+ rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
+ if (rc) /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
return rc;
}
-struct ccw_device *
-ccw_device_probe_console(void)
+struct ccw_device *ccw_device_probe_console(void)
{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
int ret;
- if (xchg(&console_cdev_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch = cio_probe_console();
- if (IS_ERR(sch)) {
- console_cdev_in_use = 0;
- return (void *) sch;
+ if (IS_ERR(sch))
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv) {
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
- memset(&console_cdev, 0, sizeof(struct ccw_device));
- memset(&console_private, 0, sizeof(struct ccw_device_private));
- console_cdev.private = &console_private;
- console_private.cdev = &console_cdev;
- console_private.int_class = IRQIO_CIO;
- ret = ccw_device_console_enable(&console_cdev, sch);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
+ }
+ set_io_private(sch, io_priv);
+ ret = ccw_device_console_enable(cdev, sch);
if (ret) {
- cio_release_console();
- console_cdev_in_use = 0;
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
return ERR_PTR(ret);
}
- console_cdev.online = 1;
- return &console_cdev;
+ return cdev;
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ while (1) {
+ cio_tsch(sch);
+ if (sch->schib.scsw.cmd.actl == 0)
+ break;
+ udelay_simple(100);
+ }
}
static int ccw_device_pm_restore(struct device *dev);
-int ccw_device_force_console(void)
+int ccw_device_force_console(struct ccw_device *cdev)
{
- if (!console_cdev_in_use)
- return -ENODEV;
- return ccw_device_pm_restore(&console_cdev.dev);
+ return ccw_device_pm_restore(&cdev->dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
#endif
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 7d4ecb65db00..8d1d29873172 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -81,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED);
}
-extern wait_queue_head_t ccw_device_init_wq;
-extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c77b6e06bf64..4845d64f2842 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -704,9 +704,9 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
- struct channel_path_desc_fmt1 desc;
+ struct channel_path *chp;
struct chp_id chpid;
- int mdc = 0, ret, i;
+ int mdc = 0, i;
/* Adjust requested path mask to excluded varied off paths. */
if (mask)
@@ -719,14 +719,20 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
if (!(mask & (0x80 >> i)))
continue;
chpid.id = sch->schib.pmcw.chpid[i];
- ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
- if (ret)
- return ret;
- if (!desc.f)
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+
+ mutex_lock(&chp->lock);
+ if (!chp->desc_fmt1.f) {
+ mutex_unlock(&chp->lock);
return 0;
- if (!desc.r)
+ }
+ if (!chp->desc_fmt1.r)
mdc = 1;
- mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
+ mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+ chp->desc_fmt1.mdc;
+ mutex_unlock(&chp->lock);
}
return mdc;
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 65d13e38803f..5a999084a229 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -17,7 +17,7 @@ struct idset {
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
- return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+ return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 408d2548a748..684cc343cf09 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1488,7 +1488,4 @@ struct megasas_mgmt_info {
int max_index;
};
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9d53540207ec..7c90d57b867e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3984,12 +3984,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (reset_devices) {
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos),
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
&control);
if (control & PCI_MSIX_FLAGS_ENABLE) {
dev_info(&pdev->dev, "resetting MSI-X\n");
pci_write_config_word(pdev,
- msi_control_reg(pos),
+ pos + PCI_MSIX_FLAGS,
control &
~PCI_MSIX_FLAGS_ENABLE);
}
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index ce90d0546cdd..74550922ad55 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -703,7 +703,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(TTI, 0x2744), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2760), chip_9480 },
{
- .vendor = 0x1b4b,
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
.device = 0x9480,
.subvendor = PCI_ANY_ID,
.subdevice = 0x9480,
@@ -712,7 +712,7 @@ static struct pci_device_id mvs_pci_table[] = {
.driver_data = chip_9480,
},
{
- .vendor = 0x1b4b,
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
.device = 0x9445,
.subvendor = PCI_ANY_ID,
.subdevice = 0x9480,
@@ -721,7 +721,7 @@ static struct pci_device_id mvs_pci_table[] = {
.driver_data = chip_9445,
},
{
- .vendor = 0x1b4b,
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
.device = 0x9485,
.subvendor = PCI_ANY_ID,
.subdevice = 0x9480,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 4594ccaaf49b..c3601b57a80c 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -49,8 +49,8 @@ MODULE_AUTHOR("jyli@marvell.com");
MODULE_DESCRIPTION("Marvell UMI Driver");
static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
{ 0 }
};
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index e360135fd1bd..41f168702ac7 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -32,7 +32,6 @@
#define VER_BUILD 1500
#define MV_DRIVER_NAME "mvumi"
-#define PCI_VENDOR_ID_MARVELL_2 0x1b4b
#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
#define PCI_DEVICE_ID_MARVELL_MV9580 0x9580
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 05400acbc456..b0452688308c 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty)
EXPORT_SYMBOL(start_tty);
+static void tty_update_time(struct timespec *time)
+{
+ unsigned long sec = get_seconds();
+ sec -= sec % 60;
+ if ((long)(sec - time->tv_sec) > 0)
+ time->tv_sec = sec;
+}
+
/**
* tty_read - read method for tty device files
* @file: pointer to tty file
@@ -960,10 +968,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int i;
+ struct inode *inode = file_inode(file);
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
- if (tty_paranoia_check(tty, file_inode(file), "tty_read"))
+ if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
return -EIO;
@@ -977,6 +986,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
i = -EIO;
tty_ldisc_deref(ld);
+ if (i > 0)
+ tty_update_time(&inode->i_atime);
+
return i;
}
@@ -1077,8 +1089,10 @@ static inline ssize_t do_tty_write(
break;
cond_resched();
}
- if (written)
+ if (written) {
+ tty_update_time(&file_inode(file)->i_mtime);
ret = written;
+ }
out:
tty_write_unlock(tty);
return ret;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7abc5c81af2c..09d2e3ffd6fc 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -70,7 +70,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
- msix_pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
u32 table;
@@ -78,8 +78,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
- vdev->msix_bar = table & PCI_MSIX_FLAGS_BIRMASK;
- vdev->msix_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
+ vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
} else
vdev->msix_bar = 0xFF;
@@ -183,7 +183,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
u8 pos;
u16 flags;
- pos = pci_find_capability(vdev->pdev, PCI_CAP_ID_MSI);
+ pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
@@ -194,7 +194,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
u8 pos;
u16 flags;
- pos = pci_find_capability(vdev->pdev, PCI_CAP_ID_MSIX);
+ pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8bda6131ae3e..981c1c08c727 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2266,7 +2266,7 @@ config XEN_FBDEV_FRONTEND
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
- select INPUT_XEN_KBDDEV_FRONTEND
+ select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
select XEN_XENBUS_FRONTEND
default y
help
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 2647ad8e1f19..d8cc8127f19c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -85,8 +85,7 @@ enum xen_irq_type {
* event channel - irq->event channel mapping
* cpu - cpu this event channel is bound to
* index - type-specific information:
- * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
- * guest, or GSI (real passthrough IRQ) of the device.
+ * PIRQ - physical IRQ, GSI, flags, and owner domain
* VIRQ - virq number
* IPI - IPI vector
* EVTCHN -
@@ -105,7 +104,6 @@ struct irq_info {
struct {
unsigned short pirq;
unsigned short gsi;
- unsigned char vector;
unsigned char flags;
uint16_t domid;
} pirq;
@@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq,
unsigned short evtchn,
unsigned short pirq,
unsigned short gsi,
- unsigned short vector,
uint16_t domid,
unsigned char flags)
{
@@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq,
info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi;
- info->u.pirq.vector = vector;
info->u.pirq.domid = domid;
info->u.pirq.flags = flags;
}
@@ -519,6 +515,9 @@ static void xen_free_irq(unsigned irq)
{
struct irq_info *info = irq_get_handler_data(irq);
+ if (WARN_ON(!info))
+ return;
+
list_del(&info->list);
irq_set_handler_data(irq, NULL);
@@ -714,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
goto out;
}
- xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
+ xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0);
pirq_query_unmask(irq);
@@ -762,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
}
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- int pirq, int vector, const char *name,
- domid_t domid)
+ int pirq, const char *name, domid_t domid)
{
int irq, ret;
@@ -776,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
name);
- xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
+ xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -1008,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq)
int evtchn = evtchn_from_irq(irq);
struct irq_info *info = irq_get_handler_data(irq);
+ if (WARN_ON(!info))
+ return;
+
mutex_lock(&irq_mapping_update_lock);
if (info->refcnt > 0) {
@@ -1135,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{
+ struct irq_info *info = irq_get_handler_data(irq);
+
+ if (WARN_ON(!info))
+ return;
free_irq(irq, dev_id);
unbind_from_irq(irq);
}
@@ -1457,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
{
struct irq_info *info = info_for_irq(irq);
+ if (WARN_ON(!info))
+ return;
+
/* Make sure the irq is masked, since the new event channel
will also be masked. */
disable_irq(irq);
@@ -1730,7 +1738,12 @@ void xen_poll_irq(int irq)
int xen_test_irq_shared(int irq)
{
struct irq_info *info = info_for_irq(irq);
- struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
+ struct physdev_irq_status_query irq_status;
+
+ if (WARN_ON(!info))
+ return -ENOENT;
+
+ irq_status.irq = info->u.pirq.pirq;
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
return 0;
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 90e34ac7e522..8abd7d579037 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/syscore_ops.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
@@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex);
/* Which ACPI ID we have processed from 'struct acpi_processor'. */
static unsigned long *acpi_ids_done;
/* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
-static unsigned long __initdata *acpi_id_present;
+static unsigned long *acpi_id_present;
/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
-static unsigned long __initdata *acpi_id_cst_present;
+static unsigned long *acpi_id_cst_present;
static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
{
@@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void)
* for_each_[present|online]_cpu macros which are banded to the virtual
* CPU amount.
*/
-static acpi_status __init
+static acpi_status
read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
{
u32 acpi_id;
@@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-static int __init check_acpi_ids(struct acpi_processor *pr_backup)
+static int check_acpi_ids(struct acpi_processor *pr_backup)
{
if (!pr_backup)
return -ENODEV;
+ if (acpi_id_present && acpi_id_cst_present)
+ /* OK, done this once .. skip to uploading */
+ goto upload;
+
/* All online CPUs have been processed at this stage. Now verify
* whether in fact "online CPUs" == physical CPUs.
*/
@@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)
read_acpi_id, NULL, NULL, NULL);
acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
+upload:
if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
unsigned int i;
for_each_set_bit(i, acpi_id_present, nr_acpi_bits) {
@@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)
(void)upload_pm_data(pr_backup);
}
}
- kfree(acpi_id_present);
- acpi_id_present = NULL;
- kfree(acpi_id_cst_present);
- acpi_id_cst_present = NULL;
+
return 0;
}
static int __init check_prereq(void)
@@ -467,10 +470,47 @@ static void free_acpi_perf_data(void)
free_percpu(acpi_perf_data);
}
-static int __init xen_acpi_processor_init(void)
+static int xen_upload_processor_pm_data(void)
{
struct acpi_processor *pr_backup = NULL;
unsigned int i;
+ int rc = 0;
+
+ pr_info(DRV_NAME "Uploading Xen processor PM info\n");
+
+ for_each_possible_cpu(i) {
+ struct acpi_processor *_pr;
+ _pr = per_cpu(processors, i /* APIC ID */);
+ if (!_pr)
+ continue;
+
+ if (!pr_backup) {
+ pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
+ if (pr_backup)
+ memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
+ }
+ (void)upload_pm_data(_pr);
+ }
+
+ rc = check_acpi_ids(pr_backup);
+ kfree(pr_backup);
+
+ return rc;
+}
+
+static void xen_acpi_processor_resume(void)
+{
+ bitmap_zero(acpi_ids_done, nr_acpi_bits);
+ xen_upload_processor_pm_data();
+}
+
+static struct syscore_ops xap_syscore_ops = {
+ .resume = xen_acpi_processor_resume,
+};
+
+static int __init xen_acpi_processor_init(void)
+{
+ unsigned int i;
int rc = check_prereq();
if (rc)
@@ -514,27 +554,12 @@ static int __init xen_acpi_processor_init(void)
goto err_out;
}
- for_each_possible_cpu(i) {
- struct acpi_processor *_pr;
- _pr = per_cpu(processors, i /* APIC ID */);
- if (!_pr)
- continue;
-
- if (!pr_backup) {
- pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
- if (pr_backup)
- memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
- }
- (void)upload_pm_data(_pr);
- }
- rc = check_acpi_ids(pr_backup);
-
- kfree(pr_backup);
- pr_backup = NULL;
-
+ rc = xen_upload_processor_pm_data();
if (rc)
goto err_unregister;
+ register_syscore_ops(&xap_syscore_ops);
+
return 0;
err_unregister:
for_each_possible_cpu(i) {
@@ -552,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void)
{
int i;
+ unregister_syscore_ops(&xap_syscore_ops);
kfree(acpi_ids_done);
+ kfree(acpi_id_present);
+ kfree(acpi_id_cst_present);
for_each_possible_cpu(i) {
struct acpi_processor_performance *perf;
perf = per_cpu_ptr(acpi_perf_data, i);
diff --git a/fs/aio.c b/fs/aio.c
index 3f941f2a3059..1dc8786f4588 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1029,9 +1029,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
spin_unlock(&info->ring_lock);
out:
- kunmap_atomic(ring);
dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
(unsigned long)ring->head, (unsigned long)ring->tail);
+ kunmap_atomic(ring);
return ret;
}
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index bcbdd7484e58..03053aca5b32 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -152,15 +152,6 @@ void acpi_penalize_isa_irq(int irq, int active);
void acpi_pci_irq_disable (struct pci_dev *dev);
-struct acpi_pci_driver {
- struct list_head node;
- int (*add)(struct acpi_pci_root *root);
- void (*remove)(struct acpi_pci_root *root);
-};
-
-int acpi_pci_register_driver(struct acpi_pci_driver *driver);
-void acpi_pci_unregister_driver(struct acpi_pci_driver *driver);
-
extern int ec_read(u8 addr, u8 *val);
extern int ec_write(u8 addr, u8 val);
extern int ec_transaction(u8 command,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index ce93a341337d..20c2d6dd5d25 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -13,14 +13,14 @@ struct msi_msg {
/* Helper functions */
struct irq_data;
struct msi_desc;
-extern void mask_msi_irq(struct irq_data *data);
-extern void unmask_msi_irq(struct irq_data *data);
-extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
-extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
-extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
+void mask_msi_irq(struct irq_data *data);
+void unmask_msi_irq(struct irq_data *data);
+void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void read_msi_msg(unsigned int irq, struct msi_msg *msg);
+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc {
struct {
@@ -54,9 +54,8 @@ struct msi_desc {
*/
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
-extern int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
-extern void arch_teardown_msi_irqs(struct pci_dev *dev);
-extern int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
-
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+void arch_teardown_msi_irqs(struct pci_dev *dev);
+int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
#endif /* LINUX_MSI_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 9121595a8ebf..433da8a1a426 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -53,6 +53,9 @@ struct mutex {
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
struct task_struct *owner;
#endif
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ void *spin_mlock; /* Spinner MCS lock */
+#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
void *magic;
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 9a22b5efb384..81b31613eb25 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -41,8 +41,37 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
return DEVICE_ACPI_HANDLE(dev);
}
+
+void acpi_pci_add_bus(struct pci_bus *bus);
+void acpi_pci_remove_bus(struct pci_bus *bus);
+
+#ifdef CONFIG_ACPI_PCI_SLOT
+void acpi_pci_slot_init(void);
+void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle);
+void acpi_pci_slot_remove(struct pci_bus *bus);
+#else
+static inline void acpi_pci_slot_init(void) { }
+static inline void acpi_pci_slot_enumerate(struct pci_bus *bus,
+ acpi_handle handle) { }
+static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
#endif
+#ifdef CONFIG_HOTPLUG_PCI_ACPI
+void acpiphp_init(void);
+void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
+void acpiphp_remove_slots(struct pci_bus *bus);
+#else
+static inline void acpiphp_init(void) { }
+static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
+ acpi_handle handle) { }
+static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
+#endif
+
+#else /* CONFIG_ACPI */
+static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
+static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
+#endif /* CONFIG_ACPI */
+
#ifdef CONFIG_ACPI_APEI
extern bool aer_acpi_firmware_first(void);
#else
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index c8320144fe79..8af4610c2e41 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -23,14 +23,14 @@
#define PCIE_LINK_STATE_CLKPM 4
#ifdef CONFIG_PCIEASPM
-extern void pcie_aspm_init_link_state(struct pci_dev *pdev);
-extern void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
-extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-extern void pci_disable_link_state(struct pci_dev *pdev, int state);
-extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-extern void pcie_clear_aspm(struct pci_bus *bus);
-extern void pcie_no_aspm(void);
+void pcie_aspm_init_link_state(struct pci_dev *pdev);
+void pcie_aspm_exit_link_state(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_disable_link_state(struct pci_dev *pdev, int state);
+void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+void pcie_clear_aspm(struct pci_bus *bus);
+void pcie_no_aspm(void);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
@@ -56,8 +56,8 @@ static inline void pcie_no_aspm(void)
#endif
#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
-extern void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-extern void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
+void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
+void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
#else
static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
{
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 7ef68724f0f0..68bcefd7fca0 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -14,9 +14,9 @@ struct pci_ats {
#ifdef CONFIG_PCI_ATS
-extern int pci_enable_ats(struct pci_dev *dev, int ps);
-extern void pci_disable_ats(struct pci_dev *dev);
-extern int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
/**
* pci_ats_enabled - query the ATS status
@@ -54,12 +54,12 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
#ifdef CONFIG_PCI_PRI
-extern int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
-extern void pci_disable_pri(struct pci_dev *pdev);
-extern bool pci_pri_enabled(struct pci_dev *pdev);
-extern int pci_reset_pri(struct pci_dev *pdev);
-extern bool pci_pri_stopped(struct pci_dev *pdev);
-extern int pci_pri_status(struct pci_dev *pdev);
+int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
+void pci_disable_pri(struct pci_dev *pdev);
+bool pci_pri_enabled(struct pci_dev *pdev);
+int pci_reset_pri(struct pci_dev *pdev);
+bool pci_pri_stopped(struct pci_dev *pdev);
+int pci_pri_status(struct pci_dev *pdev);
#else /* CONFIG_PCI_PRI */
@@ -95,10 +95,10 @@ static inline int pci_pri_status(struct pci_dev *pdev)
#ifdef CONFIG_PCI_PASID
-extern int pci_enable_pasid(struct pci_dev *pdev, int features);
-extern void pci_disable_pasid(struct pci_dev *pdev);
-extern int pci_pasid_features(struct pci_dev *pdev);
-extern int pci_max_pasids(struct pci_dev *pdev);
+int pci_enable_pasid(struct pci_dev *pdev, int features);
+void pci_disable_pasid(struct pci_dev *pdev);
+int pci_pasid_features(struct pci_dev *pdev);
+int pci_max_pasids(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 710067f3618c..e73dfa308b87 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -35,6 +35,21 @@
/* Include the ID list */
#include <linux/pci_ids.h>
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined uapi/linux/pci.h
+ * In the interest of not exposing interfaces to user-space unnecessarily,
+ * the following kernel only defines are being added here.
+ */
+#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
+/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
+#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
+
/* pci_slot represents a physical slot */
struct pci_slot {
struct pci_bus *bus; /* The bus this slot is on */
@@ -232,6 +247,8 @@ struct pci_dev {
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
u8 pcie_cap; /* PCI-E capability offset */
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
u8 rom_base_reg; /* which config register controls the ROM */
u8 pin; /* which interrupt pin this device uses */
@@ -249,8 +266,7 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI-speak,
this is D0-D3, D0 being fully functional,
and D3 being off. */
- int pm_cap; /* PM capability offset in the
- configuration space */
+ u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_interrupt:1;
@@ -348,7 +364,7 @@ static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
return dev;
}
-extern struct pci_dev *alloc_pci_dev(void);
+struct pci_dev *alloc_pci_dev(void);
#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
@@ -504,10 +520,10 @@ struct pci_ops {
* ACPI needs to be able to access PCI config space before we've done a
* PCI bus scan and created pci_bus structures.
*/
-extern int raw_pci_read(unsigned int domain, unsigned int bus,
- unsigned int devfn, int reg, int len, u32 *val);
-extern int raw_pci_write(unsigned int domain, unsigned int bus,
- unsigned int devfn, int reg, int len, u32 val);
+int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
struct pci_bus_region {
resource_size_t start;
@@ -658,7 +674,7 @@ struct pci_driver {
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
-extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
+void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
enum pcie_bus_config_types {
PCIE_BUS_TUNE_OFF,
@@ -675,9 +691,11 @@ extern struct bus_type pci_bus_type;
* code, or pci core code. */
extern struct list_head pci_root_buses; /* list of all known PCI buses */
/* Some device drivers need know if pci is initiated */
-extern int no_pci_devices(void);
+int no_pci_devices(void);
void pcibios_resource_survey_bus(struct pci_bus *bus);
+void pcibios_add_bus(struct pci_bus *bus);
+void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
int __must_check pcibios_enable_device(struct pci_dev *, int mask);
/* Architecture specific versions may override this (weak) */
@@ -699,7 +717,7 @@ void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region);
void pcibios_scan_specific_bus(int busn);
-extern struct pci_bus *pci_find_bus(int domain, int busnr);
+struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata);
@@ -732,14 +750,14 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
-extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
-extern void pci_dev_put(struct pci_dev *dev);
-extern void pci_remove_bus(struct pci_bus *b);
-extern void pci_stop_and_remove_bus_device(struct pci_dev *dev);
+struct pci_dev *pci_dev_get(struct pci_dev *dev);
+void pci_dev_put(struct pci_dev *dev);
+void pci_remove_bus(struct pci_bus *b);
+void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_root_bus(struct pci_bus *bus);
void pci_remove_root_bus(struct pci_bus *bus);
void pci_setup_cardbus(struct pci_bus *bus);
-extern void pci_sort_breadthfirst(void);
+void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
@@ -1142,18 +1160,17 @@ static inline int pci_msi_enabled(void)
return 0;
}
#else
-extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
-extern int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
-extern void pci_msi_shutdown(struct pci_dev *dev);
-extern void pci_disable_msi(struct pci_dev *dev);
-extern int pci_msix_table_size(struct pci_dev *dev);
-extern int pci_enable_msix(struct pci_dev *dev,
- struct msix_entry *entries, int nvec);
-extern void pci_msix_shutdown(struct pci_dev *dev);
-extern void pci_disable_msix(struct pci_dev *dev);
-extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
-extern void pci_restore_msi_state(struct pci_dev *dev);
-extern int pci_msi_enabled(void);
+int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
+int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
+void pci_msi_shutdown(struct pci_dev *dev);
+void pci_disable_msi(struct pci_dev *dev);
+int pci_msix_table_size(struct pci_dev *dev);
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
+void pci_msix_shutdown(struct pci_dev *dev);
+void pci_disable_msix(struct pci_dev *dev);
+void msi_remove_pci_irq_vectors(struct pci_dev *dev);
+void pci_restore_msi_state(struct pci_dev *dev);
+int pci_msi_enabled(void);
#endif
#ifdef CONFIG_PCIEPORTBUS
@@ -1168,8 +1185,8 @@ extern bool pcie_ports_auto;
static inline int pcie_aspm_enabled(void) { return 0; }
static inline bool pcie_aspm_support_enabled(void) { return false; }
#else
-extern int pcie_aspm_enabled(void);
-extern bool pcie_aspm_support_enabled(void);
+int pcie_aspm_enabled(void);
+bool pcie_aspm_support_enabled(void);
#endif
#ifdef CONFIG_PCIEAER
@@ -1187,8 +1204,8 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
}
static inline void pcie_ecrc_get_policy(char *str) {};
#else
-extern void pcie_set_ecrc_checking(struct pci_dev *dev);
-extern void pcie_ecrc_get_policy(char *str);
+void pcie_set_ecrc_checking(struct pci_dev *dev);
+void pcie_ecrc_get_policy(char *str);
#endif
#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
@@ -1199,9 +1216,9 @@ int ht_create_irq(struct pci_dev *dev, int idx);
void ht_destroy_irq(unsigned int irq);
#endif /* CONFIG_HT_IRQ */
-extern void pci_cfg_access_lock(struct pci_dev *dev);
-extern bool pci_cfg_access_trylock(struct pci_dev *dev);
-extern void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_cfg_access_lock(struct pci_dev *dev);
+bool pci_cfg_access_trylock(struct pci_dev *dev);
+void pci_cfg_access_unlock(struct pci_dev *dev);
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
@@ -1226,7 +1243,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* some architectures require additional setup to direct VGA traffic */
typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
-extern void pci_register_set_vga_state(arch_set_vga_state_t func);
+void pci_register_set_vga_state(arch_set_vga_state_t func);
#else /* CONFIG_PCI is not enabled */
@@ -1628,8 +1645,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
#ifdef CONFIG_PCI_MMCONFIG
-extern void __init pci_mmcfg_early_init(void);
-extern void __init pci_mmcfg_late_init(void);
+void __init pci_mmcfg_early_init(void);
+void __init pci_mmcfg_late_init(void);
#else
static inline void pci_mmcfg_early_init(void) { }
static inline void pci_mmcfg_late_init(void) { }
@@ -1640,12 +1657,12 @@ int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
-extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
-extern void pci_disable_sriov(struct pci_dev *dev);
-extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
-extern int pci_num_vf(struct pci_dev *dev);
-extern int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
-extern int pci_sriov_get_totalvfs(struct pci_dev *dev);
+int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
+void pci_disable_sriov(struct pci_dev *dev);
+irqreturn_t pci_sriov_migration(struct pci_dev *dev);
+int pci_num_vf(struct pci_dev *dev);
+int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
+int pci_sriov_get_totalvfs(struct pci_dev *dev);
#else
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{
@@ -1673,8 +1690,8 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
-extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+void pci_hp_create_module_link(struct pci_slot *pci_slot);
+void pci_hp_remove_module_link(struct pci_slot *pci_slot);
#endif
/**
@@ -1818,13 +1835,13 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
-extern void pci_set_of_node(struct pci_dev *dev);
-extern void pci_release_of_node(struct pci_dev *dev);
-extern void pci_set_bus_of_node(struct pci_bus *bus);
-extern void pci_release_bus_of_node(struct pci_bus *bus);
+void pci_set_of_node(struct pci_dev *dev);
+void pci_release_of_node(struct pci_dev *dev);
+void pci_set_bus_of_node(struct pci_bus *bus);
+void pci_release_bus_of_node(struct pci_bus *bus);
/* Arch may override this (weak) */
-extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev)
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 45fc162cbdc0..8db71dcd6337 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -125,12 +125,12 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
return pci_slot_name(slot->pci_slot);
}
-extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus,
- int nr, const char *name,
- struct module *owner, const char *mod_name);
-extern int pci_hp_deregister(struct hotplug_slot *slot);
-extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
- struct hotplug_slot_info *info);
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr,
+ const char *name, struct module *owner,
+ const char *mod_name);
+int pci_hp_deregister(struct hotplug_slot *slot);
+int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
+ struct hotplug_slot_info *info);
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f11c1c2609d5..a80f9e6ce9e5 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1604,6 +1604,7 @@
#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334
#define PCI_VENDOR_ID_MARVELL 0x11ab
+#define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b
#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146
#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index e6f91b1406d8..9572669eea97 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -62,7 +62,7 @@ struct pcie_port_service_driver {
#define to_service_driver(d) \
container_of(d, struct pcie_port_service_driver, driver)
-extern int pcie_port_service_register(struct pcie_port_service_driver *new);
-extern void pcie_port_service_unregister(struct pcie_port_service_driver *new);
+int pcie_port_service_register(struct pcie_port_service_driver *new);
+void pcie_port_service_unregister(struct pcie_port_service_driver *new);
#endif /* _PCIEPORT_IF_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e692a022527b..2d02c76a01be 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -321,7 +321,6 @@ extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
-extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy;
struct user_namespace;
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index ebfadc56d1b4..864e324da80d 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -292,12 +292,12 @@
/* Message Signalled Interrupts registers */
-#define PCI_MSI_FLAGS 2 /* Various flags */
-#define PCI_MSI_FLAGS_64BIT 0x80 /* 64-bit addresses allowed */
-#define PCI_MSI_FLAGS_QSIZE 0x70 /* Message queue size configured */
-#define PCI_MSI_FLAGS_QMASK 0x0e /* Maximum queue size available */
-#define PCI_MSI_FLAGS_ENABLE 0x01 /* MSI feature enabled */
-#define PCI_MSI_FLAGS_MASKBIT 0x100 /* 64-bit mask bits allowed */
+#define PCI_MSI_FLAGS 2 /* Message Control */
+#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
+#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
+#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
+#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
+#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
@@ -309,13 +309,17 @@
#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
/* MSI-X registers */
-#define PCI_MSIX_FLAGS 2
-#define PCI_MSIX_FLAGS_QSIZE 0x7FF
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
-#define PCI_MSIX_TABLE 4
-#define PCI_MSIX_PBA 8
-#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
+#define PCI_MSIX_FLAGS 2 /* Message Control */
+#define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */
+#define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */
+#define PCI_MSIX_FLAGS_ENABLE 0x8000 /* MSI-X enable */
+#define PCI_MSIX_TABLE 4 /* Table offset */
+#define PCI_MSIX_TABLE_BIR 0x00000007 /* BAR index */
+#define PCI_MSIX_TABLE_OFFSET 0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
+#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
+#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK (7 << 0) /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X entry's format */
diff --git a/include/xen/events.h b/include/xen/events.h
index c6bfe01acf6b..b2b27c6a0f7b 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -90,8 +90,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
/* Bind an PSI pirq to an irq. */
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- int pirq, int vector, const char *name,
- domid_t domid);
+ int pirq, const char *name, domid_t domid);
#endif
/* De-allocates the above mentioned physical interrupt. */
diff --git a/kernel/.gitignore b/kernel/.gitignore
index ab4f1090f437..b3097bde4e9c 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -4,3 +4,4 @@
config_data.h
config_data.gz
timeconst.h
+hz.bc
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4d3124b39277..9fcb0944f071 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4596,6 +4596,7 @@ void perf_event_comm(struct task_struct *task)
struct perf_event_context *ctx;
int ctxn;
+ rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
@@ -4603,6 +4604,7 @@ void perf_event_comm(struct task_struct *task)
perf_event_enable_on_exec(ctx);
}
+ rcu_read_unlock();
if (!atomic_read(&nr_comm_events))
return;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8a0efac4f99d..6a3bccba7e7d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -380,6 +380,13 @@ static int verbose(struct lock_class *class)
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
+static void print_lockdep_off(const char *bug_msg)
+{
+ printk(KERN_DEBUG "%s\n", bug_msg);
+ printk(KERN_DEBUG "turning off the locking correctness validator.\n");
+ printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
+}
+
static int save_trace(struct stack_trace *trace)
{
trace->nr_entries = 0;
@@ -409,8 +416,7 @@ static int save_trace(struct stack_trace *trace)
if (!debug_locks_off_graph_unlock())
return 0;
- printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
+ print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
dump_stack();
return 0;
@@ -763,8 +769,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
}
raw_local_irq_restore(flags);
- printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
- printk("turning off the locking correctness validator.\n");
+ print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack();
return NULL;
}
@@ -834,8 +839,7 @@ static struct lock_list *alloc_list_entry(void)
if (!debug_locks_off_graph_unlock())
return NULL;
- printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
+ print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
dump_stack();
return NULL;
}
@@ -2000,7 +2004,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
struct lock_class *class = hlock_class(hlock);
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
- struct held_lock *hlock_curr, *hlock_next;
+ struct held_lock *hlock_curr;
int i, j;
/*
@@ -2048,8 +2052,7 @@ cache_hit:
if (!debug_locks_off_graph_unlock())
return 0;
- printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
- printk("turning off the locking correctness validator.\n");
+ print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
dump_stack();
return 0;
}
@@ -2057,12 +2060,10 @@ cache_hit:
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
/* Find the first held_lock of current chain */
- hlock_next = hlock;
for (i = curr->lockdep_depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
- if (hlock_curr->irq_context != hlock_next->irq_context)
+ if (hlock_curr->irq_context != hlock->irq_context)
break;
- hlock_next = hlock;
}
i++;
chain->depth = curr->lockdep_depth + 1 - i;
@@ -3190,9 +3191,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
#endif
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
debug_locks_off();
- printk("BUG: MAX_LOCK_DEPTH too low, depth: %i max: %lu!\n",
+ print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
+ printk(KERN_DEBUG "depth: %i max: %lu!\n",
curr->lockdep_depth, MAX_LOCK_DEPTH);
- printk("turning off the locking correctness validator.\n");
lockdep_print_held_locks(current);
debug_show_all_locks();
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 52f23011b6e0..ad53a664f113 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -37,6 +37,12 @@
# include <asm/mutex.h>
#endif
+/*
+ * A negative mutex count indicates that waiters are sleeping waiting for the
+ * mutex.
+ */
+#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
+
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
@@ -44,6 +50,9 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock);
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ lock->spin_mlock = NULL;
+#endif
debug_mutex_init(lock, name, key);
}
@@ -95,6 +104,124 @@ void __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+/*
+ * In order to avoid a stampede of mutex spinners from acquiring the mutex
+ * more or less simultaneously, the spinners need to acquire a MCS lock
+ * first before spinning on the owner field.
+ *
+ * We don't inline mspin_lock() so that perf can correctly account for the
+ * time spent in this lock function.
+ */
+struct mspin_node {
+ struct mspin_node *next ;
+ int locked; /* 1 if lock acquired */
+};
+#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
+
+static noinline
+void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
+{
+ struct mspin_node *prev;
+
+ /* Init node */
+ node->locked = 0;
+ node->next = NULL;
+
+ prev = xchg(lock, node);
+ if (likely(prev == NULL)) {
+ /* Lock acquired */
+ node->locked = 1;
+ return;
+ }
+ ACCESS_ONCE(prev->next) = node;
+ smp_wmb();
+ /* Wait until the lock holder passes the lock down */
+ while (!ACCESS_ONCE(node->locked))
+ arch_mutex_cpu_relax();
+}
+
+static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
+{
+ struct mspin_node *next = ACCESS_ONCE(node->next);
+
+ if (likely(!next)) {
+ /*
+ * Release the lock by setting it to NULL
+ */
+ if (cmpxchg(lock, node, NULL) == node)
+ return;
+ /* Wait until the next pointer is set */
+ while (!(next = ACCESS_ONCE(node->next)))
+ arch_mutex_cpu_relax();
+ }
+ ACCESS_ONCE(next->locked) = 1;
+ smp_wmb();
+}
+
+/*
+ * Mutex spinning code migrated from kernel/sched/core.c
+ */
+
+static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
+{
+ if (lock->owner != owner)
+ return false;
+
+ /*
+ * Ensure we emit the owner->on_cpu, dereference _after_ checking
+ * lock->owner still matches owner, if that fails, owner might
+ * point to free()d memory, if it still matches, the rcu_read_lock()
+ * ensures the memory stays valid.
+ */
+ barrier();
+
+ return owner->on_cpu;
+}
+
+/*
+ * Look out! "owner" is an entirely speculative pointer
+ * access and not reliable.
+ */
+static noinline
+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+{
+ rcu_read_lock();
+ while (owner_running(lock, owner)) {
+ if (need_resched())
+ break;
+
+ arch_mutex_cpu_relax();
+ }
+ rcu_read_unlock();
+
+ /*
+ * We break out the loop above on need_resched() and when the
+ * owner changed, which is a sign for heavy contention. Return
+ * success only when lock->owner is NULL.
+ */
+ return lock->owner == NULL;
+}
+
+/*
+ * Initial check for entering the mutex spinning loop
+ */
+static inline int mutex_can_spin_on_owner(struct mutex *lock)
+{
+ int retval = 1;
+
+ rcu_read_lock();
+ if (lock->owner)
+ retval = lock->owner->on_cpu;
+ rcu_read_unlock();
+ /*
+ * if lock->owner is not set, the mutex owner may have just acquired
+ * it and not set the owner yet or the mutex has been released.
+ */
+ return retval;
+}
+#endif
+
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/**
@@ -158,25 +285,39 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
* to serialize everything.
+ *
+ * The mutex spinners are queued up using MCS lock so that only one
+ * spinner can compete for the mutex. However, if mutex spinning isn't
+ * going to happen, there is no point in going through the lock/unlock
+ * overhead.
*/
+ if (!mutex_can_spin_on_owner(lock))
+ goto slowpath;
for (;;) {
struct task_struct *owner;
+ struct mspin_node node;
/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
+ mspin_lock(MLOCK(lock), &node);
owner = ACCESS_ONCE(lock->owner);
- if (owner && !mutex_spin_on_owner(lock, owner))
+ if (owner && !mutex_spin_on_owner(lock, owner)) {
+ mspin_unlock(MLOCK(lock), &node);
break;
+ }
- if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
+ if ((atomic_read(&lock->count) == 1) &&
+ (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
+ mspin_unlock(MLOCK(lock), &node);
preempt_enable();
return 0;
}
+ mspin_unlock(MLOCK(lock), &node);
/*
* When there's no owner, we might have preempted between the
@@ -195,6 +336,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*/
arch_mutex_cpu_relax();
}
+slowpath:
#endif
spin_lock_mutex(&lock->wait_lock, flags);
@@ -205,7 +347,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
- if (atomic_xchg(&lock->count, -1) == 1)
+ if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
goto done;
lock_contended(&lock->dep_map, ip);
@@ -220,7 +362,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* that when we release the lock, we properly wake up the
* other waiters:
*/
- if (atomic_xchg(&lock->count, -1) == 1)
+ if (MUTEX_SHOW_NO_WAITER(lock) &&
+ (atomic_xchg(&lock->count, -1) == 1))
break;
/*
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 7890b10084a7..1d96dd0d93c1 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/freezer.h>
+#include <linux/stat.h>
#include "rtmutex.h"
@@ -366,8 +367,8 @@ static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *at
return curr - buf;
}
-static DEVICE_ATTR(status, 0600, sysfs_test_status, NULL);
-static DEVICE_ATTR(command, 0600, NULL, sysfs_test_command);
+static DEVICE_ATTR(status, S_IRUSR, sysfs_test_status, NULL);
+static DEVICE_ATTR(command, S_IWUSR, NULL, sysfs_test_command);
static struct bus_type rttest_subsys = {
.name = "rttest",
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 67d04651f44b..42053547e0f5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2999,51 +2999,6 @@ void __sched schedule_preempt_disabled(void)
preempt_disable();
}
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-
-static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
-{
- if (lock->owner != owner)
- return false;
-
- /*
- * Ensure we emit the owner->on_cpu, dereference _after_ checking
- * lock->owner still matches owner, if that fails, owner might
- * point to free()d memory, if it still matches, the rcu_read_lock()
- * ensures the memory stays valid.
- */
- barrier();
-
- return owner->on_cpu;
-}
-
-/*
- * Look out! "owner" is an entirely speculative pointer
- * access and not reliable.
- */
-int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
-{
- if (!sched_feat(OWNER_SPIN))
- return 0;
-
- rcu_read_lock();
- while (owner_running(lock, owner)) {
- if (need_resched())
- break;
-
- arch_mutex_cpu_relax();
- }
- rcu_read_unlock();
-
- /*
- * We break out the loop above on need_resched() and when the
- * owner changed, which is a sign for heavy contention. Return
- * success only when lock->owner is NULL.
- */
- return lock->owner == NULL;
-}
-#endif
-
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 1ad1d2b5395f..99399f8e4799 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -46,13 +46,6 @@ SCHED_FEAT(DOUBLE_TICK, false)
SCHED_FEAT(LB_BIAS, true)
/*
- * Spin-wait on mutex acquisition when the mutex owner is running on
- * another cpu -- assumes that when the owner is running, it will soon
- * release the lock. Decreases scheduling overhead.
- */
-SCHED_FEAT(OWNER_SPIN, true)
-
-/*
* Decrement CPU power based on time not spent running tasks
*/
SCHED_FEAT(NONTASK_POWER, true)
diff --git a/mm/nommu.c b/mm/nommu.c
index 2f3ea749c318..e001768b14e8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1838,6 +1838,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(remap_pfn_range);
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
+{
+ unsigned long pfn = start >> PAGE_SHIFT;
+ unsigned long vm_len = vma->vm_end - vma->vm_start;
+
+ pfn += vma->vm_pgoff;
+ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_iomap_memory);
+
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff)
{