aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/ide.h11
-rw-r--r--include/asm-alpha/semaphore.h150
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h5
-rw-r--r--include/asm-arm/arch-s3c2410/spi.h1
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h8
-rw-r--r--include/asm-arm/hardware/iop_adma.h2
-rw-r--r--include/asm-arm/ide.h8
-rw-r--r--include/asm-arm/semaphore-helper.h84
-rw-r--r--include/asm-arm/semaphore.h99
-rw-r--r--include/asm-avr32/semaphore.h109
-rw-r--r--include/asm-blackfin/ide.h4
-rw-r--r--include/asm-blackfin/semaphore-helper.h82
-rw-r--r--include/asm-blackfin/semaphore.h106
-rw-r--r--include/asm-cris/semaphore-helper.h78
-rw-r--r--include/asm-cris/semaphore.h134
-rw-r--r--include/asm-frv/semaphore.h156
-rw-r--r--include/asm-h8300/semaphore-helper.h85
-rw-r--r--include/asm-h8300/semaphore.h191
-rw-r--r--include/asm-ia64/acpi.h33
-rw-r--r--include/asm-ia64/cputime.h104
-rw-r--r--include/asm-ia64/elf.h31
-rw-r--r--include/asm-ia64/ide.h10
-rw-r--r--include/asm-ia64/kprobes.h7
-rw-r--r--include/asm-ia64/kregs.h3
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/numa.h2
-rw-r--r--include/asm-ia64/pal.h72
-rw-r--r--include/asm-ia64/pgtable.h2
-rw-r--r--include/asm-ia64/sal.h21
-rw-r--r--include/asm-ia64/semaphore.h100
-rw-r--r--include/asm-ia64/smp.h3
-rw-r--r--include/asm-ia64/system.h12
-rw-r--r--include/asm-ia64/thread_info.h14
-rw-r--r--include/asm-ia64/tlb.h26
-rw-r--r--include/asm-ia64/tlbflush.h1
-rw-r--r--include/asm-m32r/ide.h10
-rw-r--r--include/asm-m32r/semaphore.h145
-rw-r--r--include/asm-m68k/semaphore-helper.h142
-rw-r--r--include/asm-m68k/semaphore.h164
-rw-r--r--include/asm-m68knommu/semaphore-helper.h82
-rw-r--r--include/asm-m68knommu/semaphore.h154
-rw-r--r--include/asm-mips/mach-au1x00/au1xxx_ide.h18
-rw-r--r--include/asm-mips/mach-db1x00/db1200.h4
-rw-r--r--include/asm-mips/mach-generic/ide.h10
-rw-r--r--include/asm-mips/mach-pb1x00/pb1200.h4
-rw-r--r--include/asm-mips/semaphore.h109
-rw-r--r--include/asm-mn10300/semaphore.h170
-rw-r--r--include/asm-parisc/ide.h2
-rw-r--r--include/asm-parisc/semaphore-helper.h89
-rw-r--r--include/asm-parisc/semaphore.h146
-rw-r--r--include/asm-powerpc/ide.h57
-rw-r--r--include/asm-powerpc/mediabay.h6
-rw-r--r--include/asm-powerpc/semaphore.h95
-rw-r--r--include/asm-s390/cio.h4
-rw-r--r--include/asm-s390/cpu.h8
-rw-r--r--include/asm-s390/debug.h5
-rw-r--r--include/asm-s390/extmem.h11
-rw-r--r--include/asm-s390/hardirq.h2
-rw-r--r--include/asm-s390/lowcore.h11
-rw-r--r--include/asm-s390/processor.h7
-rw-r--r--include/asm-s390/semaphore.h108
-rw-r--r--include/asm-s390/smp.h3
-rw-r--r--include/asm-s390/sysinfo.h116
-rw-r--r--include/asm-s390/system.h2
-rw-r--r--include/asm-s390/timex.h13
-rw-r--r--include/asm-s390/tlbflush.h36
-rw-r--r--include/asm-s390/topology.h23
-rw-r--r--include/asm-sh/ide.h3
-rw-r--r--include/asm-sh/semaphore-helper.h89
-rw-r--r--include/asm-sh/semaphore.h116
-rw-r--r--include/asm-sparc/ide.h2
-rw-r--r--include/asm-sparc/semaphore.h193
-rw-r--r--include/asm-sparc64/ide.h2
-rw-r--r--include/asm-sparc64/semaphore.h54
-rw-r--r--include/asm-um/semaphore.h7
-rw-r--r--include/asm-um/tlb.h1
-rw-r--r--include/asm-v850/semaphore.h85
-rw-r--r--include/asm-x86/Kbuild1
-rw-r--r--include/asm-x86/a.out-core.h12
-rw-r--r--include/asm-x86/acpi.h8
-rw-r--r--include/asm-x86/alternative.h32
-rw-r--r--include/asm-x86/apic.h7
-rw-r--r--include/asm-x86/apicdef.h69
-rw-r--r--include/asm-x86/atomic_32.h143
-rw-r--r--include/asm-x86/atomic_64.h251
-rw-r--r--include/asm-x86/bios_ebda.h (renamed from include/asm-x86/mach-default/bios_ebda.h)0
-rw-r--r--include/asm-x86/bitops.h54
-rw-r--r--include/asm-x86/bitops_32.h40
-rw-r--r--include/asm-x86/bitops_64.h40
-rw-r--r--include/asm-x86/bug.h34
-rw-r--r--include/asm-x86/byteorder.h39
-rw-r--r--include/asm-x86/cacheflush.h80
-rw-r--r--include/asm-x86/checksum_32.h152
-rw-r--r--include/asm-x86/checksum_64.h118
-rw-r--r--include/asm-x86/cmpxchg_32.h253
-rw-r--r--include/asm-x86/cmpxchg_64.h134
-rw-r--r--include/asm-x86/compat.h2
-rw-r--r--include/asm-x86/cpufeature.h8
-rw-r--r--include/asm-x86/current_32.h2
-rw-r--r--include/asm-x86/current_64.h12
-rw-r--r--include/asm-x86/desc.h61
-rw-r--r--include/asm-x86/desc_defs.h15
-rw-r--r--include/asm-x86/div64.h35
-rw-r--r--include/asm-x86/dma.h45
-rw-r--r--include/asm-x86/dwarf2_64.h9
-rw-r--r--include/asm-x86/e820_32.h4
-rw-r--r--include/asm-x86/e820_64.h24
-rw-r--r--include/asm-x86/edac.h4
-rw-r--r--include/asm-x86/efi.h2
-rw-r--r--include/asm-x86/elf.h112
-rw-r--r--include/asm-x86/fixmap_32.h21
-rw-r--r--include/asm-x86/fixmap_64.h20
-rw-r--r--include/asm-x86/floppy.h93
-rw-r--r--include/asm-x86/futex.h101
-rw-r--r--include/asm-x86/genapic_32.h105
-rw-r--r--include/asm-x86/genapic_64.h10
-rw-r--r--include/asm-x86/geode.h2
-rw-r--r--include/asm-x86/highmem.h2
-rw-r--r--include/asm-x86/hw_irq_64.h15
-rw-r--r--include/asm-x86/hypertransport.h15
-rw-r--r--include/asm-x86/i387.h23
-rw-r--r--include/asm-x86/i8259.h29
-rw-r--r--include/asm-x86/ia32.h62
-rw-r--r--include/asm-x86/ide.h10
-rw-r--r--include/asm-x86/io.h6
-rw-r--r--include/asm-x86/io_32.h137
-rw-r--r--include/asm-x86/io_64.h110
-rw-r--r--include/asm-x86/io_apic.h8
-rw-r--r--include/asm-x86/ioctls.h13
-rw-r--r--include/asm-x86/ipcbuf.h3
-rw-r--r--include/asm-x86/ipi.h11
-rw-r--r--include/asm-x86/irq_32.h2
-rw-r--r--include/asm-x86/irq_64.h4
-rw-r--r--include/asm-x86/irqflags.h59
-rw-r--r--include/asm-x86/kdebug.h5
-rw-r--r--include/asm-x86/kexec.h71
-rw-r--r--include/asm-x86/kgdb.h81
-rw-r--r--include/asm-x86/kprobes.h12
-rw-r--r--include/asm-x86/kvm_host.h24
-rw-r--r--include/asm-x86/kvm_x86_emulate.h28
-rw-r--r--include/asm-x86/lguest.h11
-rw-r--r--include/asm-x86/lguest_hcall.h5
-rw-r--r--include/asm-x86/linkage.h3
-rw-r--r--include/asm-x86/local.h105
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apic.h18
-rw-r--r--include/asm-x86/mach-default/mach_apic.h95
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h9
-rw-r--r--include/asm-x86/mach-default/mach_ipi.h10
-rw-r--r--include/asm-x86/mach-default/mach_mpparse.h11
-rw-r--r--include/asm-x86/mach-default/mach_reboot.h61
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h7
-rw-r--r--include/asm-x86/mach-es7000/mach_apic.h20
-rw-r--r--include/asm-x86/mach-es7000/mach_mpparse.h11
-rw-r--r--include/asm-x86/mach-generic/mach_apic.h1
-rw-r--r--include/asm-x86/mach-generic/mach_mpparse.h5
-rw-r--r--include/asm-x86/mach-numaq/mach_apic.h10
-rw-r--r--include/asm-x86/mach-numaq/mach_mpparse.h23
-rw-r--r--include/asm-x86/mach-summit/mach_apic.h14
-rw-r--r--include/asm-x86/mach-summit/mach_mpparse.h11
-rw-r--r--include/asm-x86/mach-visws/mach_apic.h2
-rw-r--r--include/asm-x86/mach-visws/smpboot_hooks.h4
-rw-r--r--include/asm-x86/mach_apic.h29
-rw-r--r--include/asm-x86/mc146818rtc.h16
-rw-r--r--include/asm-x86/mca_dma.h34
-rw-r--r--include/asm-x86/mmu.h4
-rw-r--r--include/asm-x86/mmu_context_32.h12
-rw-r--r--include/asm-x86/mmu_context_64.h21
-rw-r--r--include/asm-x86/mmx.h2
-rw-r--r--include/asm-x86/mmzone_32.h4
-rw-r--r--include/asm-x86/mmzone_64.h24
-rw-r--r--include/asm-x86/mpspec.h47
-rw-r--r--include/asm-x86/mpspec_def.h46
-rw-r--r--include/asm-x86/msidef.h12
-rw-r--r--include/asm-x86/msr-index.h4
-rw-r--r--include/asm-x86/msr.h89
-rw-r--r--include/asm-x86/mtrr.h73
-rw-r--r--include/asm-x86/mutex_32.h66
-rw-r--r--include/asm-x86/mutex_64.h73
-rw-r--r--include/asm-x86/nmi.h94
-rw-r--r--include/asm-x86/nmi_32.h61
-rw-r--r--include/asm-x86/nmi_64.h90
-rw-r--r--include/asm-x86/nops.h2
-rw-r--r--include/asm-x86/numa_64.h5
-rw-r--r--include/asm-x86/numaq.h9
-rw-r--r--include/asm-x86/page.h4
-rw-r--r--include/asm-x86/page_32.h9
-rw-r--r--include/asm-x86/page_64.h10
-rw-r--r--include/asm-x86/param.h4
-rw-r--r--include/asm-x86/paravirt.h54
-rw-r--r--include/asm-x86/parport.h6
-rw-r--r--include/asm-x86/pat.h16
-rw-r--r--include/asm-x86/pci-direct.h2
-rw-r--r--include/asm-x86/pci.h8
-rw-r--r--include/asm-x86/pci_64.h14
-rw-r--r--include/asm-x86/pda.h81
-rw-r--r--include/asm-x86/percpu.h104
-rw-r--r--include/asm-x86/pgtable-2level.h18
-rw-r--r--include/asm-x86/pgtable-3level.h48
-rw-r--r--include/asm-x86/pgtable.h178
-rw-r--r--include/asm-x86/pgtable_32.h100
-rw-r--r--include/asm-x86/pgtable_64.h151
-rw-r--r--include/asm-x86/posix_types.h8
-rw-r--r--include/asm-x86/posix_types_32.h47
-rw-r--r--include/asm-x86/posix_types_64.h54
-rw-r--r--include/asm-x86/processor.h729
-rw-r--r--include/asm-x86/proto.h6
-rw-r--r--include/asm-x86/ptrace.h46
-rw-r--r--include/asm-x86/reboot.h3
-rw-r--r--include/asm-x86/resume-trace.h15
-rw-r--r--include/asm-x86/rio.h76
-rw-r--r--include/asm-x86/rwsem.h169
-rw-r--r--include/asm-x86/segment.h3
-rw-r--r--include/asm-x86/semaphore.h6
-rw-r--r--include/asm-x86/semaphore_32.h175
-rw-r--r--include/asm-x86/semaphore_64.h180
-rw-r--r--include/asm-x86/setup.h8
-rw-r--r--include/asm-x86/sigcontext.h7
-rw-r--r--include/asm-x86/sigcontext32.h4
-rw-r--r--include/asm-x86/signal.h48
-rw-r--r--include/asm-x86/smp.h210
-rw-r--r--include/asm-x86/smp_32.h165
-rw-r--r--include/asm-x86/smp_64.h101
-rw-r--r--include/asm-x86/sparsemem.h6
-rw-r--r--include/asm-x86/spinlock.h117
-rw-r--r--include/asm-x86/srat.h2
-rw-r--r--include/asm-x86/string_32.h2
-rw-r--r--include/asm-x86/string_64.h66
-rw-r--r--include/asm-x86/suspend_32.h12
-rw-r--r--include/asm-x86/suspend_64.h5
-rw-r--r--include/asm-x86/swiotlb.h28
-rw-r--r--include/asm-x86/sync_bitops.h77
-rw-r--r--include/asm-x86/system.h115
-rw-r--r--include/asm-x86/tce.h2
-rw-r--r--include/asm-x86/thread_info_32.h88
-rw-r--r--include/asm-x86/thread_info_64.h78
-rw-r--r--include/asm-x86/tlbflush.h5
-rw-r--r--include/asm-x86/topology.h23
-rw-r--r--include/asm-x86/trampoline.h21
-rw-r--r--include/asm-x86/tsc.h2
-rw-r--r--include/asm-x86/uaccess_32.h316
-rw-r--r--include/asm-x86/uaccess_64.h376
-rw-r--r--include/asm-x86/unaligned.h2
-rw-r--r--include/asm-x86/unistd.h8
-rw-r--r--include/asm-x86/unistd_32.h2
-rw-r--r--include/asm-x86/unistd_64.h2
-rw-r--r--include/asm-x86/user32.h7
-rw-r--r--include/asm-x86/user_32.h6
-rw-r--r--include/asm-x86/user_64.h16
-rw-r--r--include/asm-x86/uv/uv_hub.h284
-rw-r--r--include/asm-x86/uv/uv_mmrs.h373
-rw-r--r--include/asm-x86/vdso.h23
-rw-r--r--include/asm-x86/vga.h4
-rw-r--r--include/asm-x86/vm86.h23
-rw-r--r--include/asm-x86/vmi.h88
-rw-r--r--include/asm-x86/voyager.h51
-rw-r--r--include/asm-x86/xor_32.h494
-rw-r--r--include/asm-x86/xor_64.h294
-rw-r--r--include/asm-xtensa/semaphore.h100
-rw-r--r--include/linux/Kbuild5
-rw-r--r--include/linux/arcdevice.h4
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/attribute_container.h2
-rw-r--r--include/linux/audit.h29
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/dmaengine.h36
-rw-r--r--include/linux/filter.h35
-rw-r--r--include/linux/fsl_devices.h2
-rw-r--r--include/linux/hdreg.h4
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/icmpv6.h11
-rw-r--r--include/linux/ide.h174
-rw-r--r--include/linux/ieee80211.h35
-rw-r--r--include/linux/if_arp.h6
-rw-r--r--include/linux/if_tun.h39
-rw-r--r--include/linux/if_tunnel.h22
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/if_wanpipe.h124
-rw-r--r--include/linux/igmp.h38
-rw-r--r--include/linux/in6.h34
-rw-r--r--include/linux/inetdevice.h8
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/iocontext.h4
-rw-r--r--include/linux/ipv6.h50
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/isdn.h6
-rw-r--r--include/linux/kgdb.h281
-rw-r--r--include/linux/libata.h718
-rw-r--r--include/linux/lm_interface.h10
-rw-r--r--include/linux/mbcache.h2
-rw-r--r--include/linux/mlx4/cmd.h2
-rw-r--r--include/linux/mlx4/cq.h19
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx4/qp.h15
-rw-r--r--include/linux/mroute.h25
-rw-r--r--include/linux/mroute6.h228
-rw-r--r--include/linux/mtio.h1
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h36
-rw-r--r--include/linux/netfilter.h85
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h40
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h185
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/linux/netfilter/x_tables.h4
-rw-r--r--include/linux/netfilter/xt_sctp.h84
-rw-r--r--include/linux/netfilter_arp/arp_tables.h17
-rw-r--r--include/linux/netfilter_bridge/ebt_nflog.h21
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/nl80211.h218
-rw-r--r--include/linux/pcounter.h74
-rw-r--r--include/linux/phy.h61
-rw-r--r--include/linux/pim.h45
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/scatterlist.h5
-rw-r--r--include/linux/security.h162
-rw-r--r--include/linux/selinux.h134
-rw-r--r--include/linux/semaphore.h51
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/seq_file_net.h27
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/skbuff.h206
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/smc91x.h13
-rw-r--r--include/linux/spinlock.h37
-rw-r--r--include/linux/ssb/ssb.h54
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h7
-rw-r--r--include/linux/ssb/ssb_driver_gige.h174
-rw-r--r--include/linux/ssb/ssb_driver_pci.h19
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/thread_info.h12
-rw-r--r--include/linux/transport_class.h5
-rw-r--r--include/linux/tty_driver.h12
-rw-r--r--include/linux/uaccess.h22
-rw-r--r--include/linux/udp.h19
-rw-r--r--include/linux/wireless.h1
-rw-r--r--include/linux/xfrm.h3
-rw-r--r--include/net/addrconf.h122
-rw-r--r--include/net/cfg80211.h159
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/icmp.h9
-rw-r--r--include/net/ieee80211.h1
-rw-r--r--include/net/ieee80211softmac.h373
-rw-r--r--include/net/ieee80211softmac_wx.h99
-rw-r--r--include/net/inet_common.h11
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_ecn.h2
-rw-r--r--include/net/inet_frag.h6
-rw-r--r--include/net/inet_hashtables.h29
-rw-r--r--include/net/inet_sock.h7
-rw-r--r--include/net/inet_timewait_sock.h18
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ip6_fib.h18
-rw-r--r--include/net/ip6_route.h56
-rw-r--r--include/net/ipip.h10
-rw-r--r--include/net/ipv6.h84
-rw-r--r--include/net/irda/irlan_eth.h1
-rw-r--r--include/net/llc_if.h5
-rw-r--r--include/net/mac80211.h560
-rw-r--r--include/net/mip6.h3
-rw-r--r--include/net/ndisc.h31
-rw-r--r--include/net/neighbour.h31
-rw-r--r--include/net/net_namespace.h75
-rw-r--r--include/net/netfilter/nf_conntrack.h39
-rw-r--r--include/net/netfilter/nf_conntrack_core.h4
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h22
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h5
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h19
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h13
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h96
-rw-r--r--include/net/netfilter/nf_nat_helper.h3
-rw-r--r--include/net/netfilter/nf_nat_protocol.h46
-rw-r--r--include/net/netfilter/nf_nat_rule.h3
-rw-r--r--include/net/netlabel.h14
-rw-r--r--include/net/netns/core.h16
-rw-r--r--include/net/netns/dccp.h11
-rw-r--r--include/net/netns/generic.h49
-rw-r--r--include/net/netns/ipv4.h11
-rw-r--r--include/net/netns/ipv6.h18
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/raw.h4
-rw-r--r--include/net/request_sock.h9
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sctp/command.h4
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--include/net/sctp/structs.h10
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/sock.h118
-rw-r--r--include/net/tcp.h35
-rw-r--r--include/net/tipc/tipc_bearer.h9
-rw-r--r--include/net/tipc/tipc_port.h13
-rw-r--r--include/net/udp.h15
-rw-r--r--include/net/wireless.h186
-rw-r--r--include/net/xfrm.h69
-rw-r--r--include/rdma/ib_user_verbs.h5
-rw-r--r--include/rdma/ib_verbs.h35
-rw-r--r--include/scsi/iscsi_proto.h6
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/sas_ata.h4
-rw-r--r--include/scsi/scsi_cmnd.h17
-rw-r--r--include/scsi/scsi_eh.h5
-rw-r--r--include/scsi/scsi_host.h1
406 files changed, 10058 insertions, 10702 deletions
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
index b7bf68d0407b..f44129abc02c 100644
--- a/include/asm-alpha/ide.h
+++ b/include/asm-alpha/ide.h
@@ -13,9 +13,6 @@
#ifdef __KERNEL__
-
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
static inline int ide_default_irq(unsigned long base)
{
switch (base) {
@@ -40,14 +37,6 @@ static inline unsigned long ide_default_io_base(int index)
}
}
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_PCI
-#define ide_init_default_irq(base) (0)
-#else
-#define ide_init_default_irq(base) ide_default_irq(base)
-#endif
-
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index f1e9278a9fe2..d9b2034ed1d2 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -1,149 +1 @@
-#ifndef _ALPHA_SEMAPHORE_H
-#define _ALPHA_SEMAPHORE_H
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1996, 2000 Richard Henderson
- */
-
-#include <asm/current.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/compiler.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- /*
- * Logically,
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- * except that gcc produces better initializing by parts yet.
- */
-
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void down(struct semaphore *);
-extern void __down_failed(struct semaphore *);
-extern int down_interruptible(struct semaphore *);
-extern int __down_failed_interruptible(struct semaphore *);
-extern int down_trylock(struct semaphore *);
-extern void up(struct semaphore *);
-extern void __up_wakeup(struct semaphore *);
-
-/*
- * Hidden out of line code is fun, but extremely messy. Rely on newer
- * compilers to do a respectable job with this. The contention cases
- * are handled out of line in arch/alpha/kernel/semaphore.c.
- */
-
-static inline void __down(struct semaphore *sem)
-{
- long count;
- might_sleep();
- count = atomic_dec_return(&sem->count);
- if (unlikely(count < 0))
- __down_failed(sem);
-}
-
-static inline int __down_interruptible(struct semaphore *sem)
-{
- long count;
- might_sleep();
- count = atomic_dec_return(&sem->count);
- if (unlikely(count < 0))
- return __down_failed_interruptible(sem);
- return 0;
-}
-
-/*
- * down_trylock returns 0 on success, 1 if we failed to get the lock.
- */
-
-static inline int __down_trylock(struct semaphore *sem)
-{
- long ret;
-
- /* "Equivalent" C:
-
- do {
- ret = ldl_l;
- --ret;
- if (ret < 0)
- break;
- ret = stl_c = ret;
- } while (ret == 0);
- */
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " subl %0,1,%0\n"
- " blt %0,2f\n"
- " stl_c %0,%1\n"
- " beq %0,3f\n"
- " mb\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (ret), "=m" (sem->count)
- : "m" (sem->count));
-
- return ret < 0;
-}
-
-static inline void __up(struct semaphore *sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up_wakeup(sem);
-}
-
-#if !defined(CONFIG_DEBUG_SEMAPHORE)
-extern inline void down(struct semaphore *sem)
-{
- __down(sem);
-}
-extern inline int down_interruptible(struct semaphore *sem)
-{
- return __down_interruptible(sem);
-}
-extern inline int down_trylock(struct semaphore *sem)
-{
- return __down_trylock(sem);
-}
-extern inline void up(struct semaphore *sem)
-{
- __up(sem);
-}
-#endif
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index efd9a5eb1008..90d14ee564f5 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -454,11 +454,6 @@ static inline void iop_chan_append(struct iop_adma_chan *chan)
__raw_writel(adma_accr, ADMA_ACCR(chan));
}
-static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
-{
- do { } while (0);
-}
-
static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
{
return __raw_readl(ADMA_ACSR(chan));
diff --git a/include/asm-arm/arch-s3c2410/spi.h b/include/asm-arm/arch-s3c2410/spi.h
index 7ca0ed97a6d0..352d33860b63 100644
--- a/include/asm-arm/arch-s3c2410/spi.h
+++ b/include/asm-arm/arch-s3c2410/spi.h
@@ -15,6 +15,7 @@
struct s3c2410_spi_info {
unsigned long pin_cs; /* simple gpio cs */
+ unsigned int num_cs; /* total chipselects */
void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index 5c529e6a5e3b..84d635b0a71a 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -767,20 +767,12 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
static inline void iop_chan_append(struct iop_adma_chan *chan)
{
u32 dma_chan_ctrl;
- /* workaround dropped interrupts on 3xx */
- mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
dma_chan_ctrl |= 0x2;
__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
}
-static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
-{
- if (!busy)
- del_timer(&chan->cleanup_watchdog);
-}
-
static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
{
return __raw_readl(DMA_CSR(chan));
diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h
index ca8e71f44346..cb7e3611bcba 100644
--- a/include/asm-arm/hardware/iop_adma.h
+++ b/include/asm-arm/hardware/iop_adma.h
@@ -51,7 +51,6 @@ struct iop_adma_device {
* @common: common dmaengine channel object members
* @last_used: place holder for allocation to continue from where it left off
* @all_slots: complete domain of slots usable by the channel
- * @cleanup_watchdog: workaround missed interrupts on iop3xx
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
*/
@@ -65,7 +64,6 @@ struct iop_adma_chan {
struct dma_chan common;
struct iop_adma_desc_slot *last_used;
struct list_head all_slots;
- struct timer_list cleanup_watchdog;
int slots_allocated;
struct tasklet_struct irq_tasklet;
};
diff --git a/include/asm-arm/ide.h b/include/asm-arm/ide.h
index f348fcf3150b..88f4d231ce4f 100644
--- a/include/asm-arm/ide.h
+++ b/include/asm-arm/ide.h
@@ -17,14 +17,6 @@
#define MAX_HWIFS 4
#endif
-#if !defined(CONFIG_ARCH_L7200)
-# ifdef CONFIG_ARCH_CLPS7500
-# define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-# else
-# define ide_default_io_ctl(base) (0)
-# endif
-#endif /* !ARCH_L7200 */
-
#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h
deleted file mode 100644
index 1d7f1987edb9..000000000000
--- a/include/asm-arm/semaphore-helper.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef ASMARM_SEMAPHORE_HELPER_H
-#define ASMARM_SEMAPHORE_HELPER_H
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->count) <= 0)
- sem->waking++;
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking non zero interruptible
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_try_lock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
- atomic_inc(&sem->count);
- else {
- sem->waking--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 1c8b441f89e3..d9b2034ed1d2 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -1,98 +1 @@
-/*
- * linux/include/asm-arm/semaphore.h
- */
-#ifndef __ASM_ARM_SEMAPHORE_H
-#define __ASM_ARM_SEMAPHORE_H
-
-#include <linux/linkage.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-#include <asm/locks.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INIT(name, cnt) \
-{ \
- .count = ATOMIC_INIT(cnt), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INIT(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-/*
- * special register calling convention
- */
-asmlinkage void __down_failed(void);
-asmlinkage int __down_interruptible_failed(void);
-asmlinkage int __down_trylock_failed(void);
-asmlinkage void __up_wakeup(void);
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down" is the actual routine that waits...
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- __down_op(sem, __down_failed);
-}
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_interruptible" is the actual routine that waits...
- */
-static inline int down_interruptible (struct semaphore * sem)
-{
- might_sleep();
- return __down_op_ret(sem, __down_interruptible_failed);
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
- return __down_op_ret(sem, __down_trylock_failed);
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- __up_op(sem, __up_wakeup);
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h
index feaf1d453386..d9b2034ed1d2 100644
--- a/include/asm-avr32/semaphore.h
+++ b/include/asm-avr32/semaphore.h
@@ -1,108 +1 @@
-/*
- * SMP- and interrupt-safe semaphores.
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * Based on include/asm-i386/semaphore.h
- * Copyright (C) 1996 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_AVR32_SEMAPHORE_H
-#define __ASM_AVR32_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-void __down(struct semaphore * sem);
-int __down_interruptible(struct semaphore * sem);
-void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/i386/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (unlikely(atomic_dec_return (&sem->count) < 0))
- __down (sem);
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (unlikely(atomic_dec_return (&sem->count) < 0))
- ret = __down_interruptible (sem);
- return ret;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return (&sem->count) <= 0))
- __up (sem);
-}
-
-#endif /*__ASM_AVR32_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-blackfin/ide.h b/include/asm-blackfin/ide.h
index 121e272581d6..5b88de115bf4 100644
--- a/include/asm-blackfin/ide.h
+++ b/include/asm-blackfin/ide.h
@@ -19,10 +19,6 @@
#define MAX_HWIFS 1
-/* Legacy ... BLK_DEV_IDECS */
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
-
#include <asm-generic/ide_iops.h>
/****************************************************************************/
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h
deleted file mode 100644
index 9082b0dc3eb5..000000000000
--- a/include/asm-blackfin/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Based on M68K version, Lineo Inc. May 2001 */
-
-#ifndef _BFIN_SEMAPHORE_HELPER_H
-#define _BFIN_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- */
-
-#include <asm/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore *sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret = 0;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 0;
- } else
- atomic_inc(&sem->count);
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* _BFIN_SEMAPHORE_HELPER_H */
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h
index 533f90fb2e4e..d9b2034ed1d2 100644
--- a/include/asm-blackfin/semaphore.h
+++ b/include/asm-blackfin/semaphore.h
@@ -1,105 +1 @@
-#ifndef _BFIN_SEMAPHORE_H
-#define _BFIN_SEMAPHORE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * BFIN version by akbar hussain Lineo Inc April 2001
- *
- */
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore *sem);
-asmlinkage int __down_interruptible(struct semaphore *sem);
-asmlinkage int __down_trylock(struct semaphore *sem);
-asmlinkage void __up(struct semaphore *sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits.
- */
-static inline void down(struct semaphore *sem)
-{
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- int ret = 0;
-
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
- return (ret);
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
- int ret = 0;
-
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_trylock(sem);
- return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore *sem)
-{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
-
-#endif /* __ASSEMBLY__ */
-#endif /* _BFIN_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
deleted file mode 100644
index 27bfeca1b981..000000000000
--- a/include/asm-cris/semaphore-helper.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
- *
- * SMP- and interrupt-safe semaphores helper functions. Generic versions, no
- * optimizations whatsoever...
- *
- */
-
-#ifndef _ASM_SEMAPHORE_HELPER_H
-#define _ASM_SEMAPHORE_HELPER_H
-
-#include <asm/atomic.h>
-#include <linux/errno.h>
-
-#define read(a) ((a)->counter)
-#define inc(a) (((a)->counter)++)
-#define dec(a) (((a)->counter)--)
-
-#define count_inc(a) ((*(a))++)
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- local_irq_save(flags);
- if (read(&sem->waking) > 0) {
- dec(&sem->waking);
- ret = 1;
- }
- local_irq_restore(flags);
- return ret;
-}
-
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret = 0;
- unsigned long flags;
-
- local_irq_save(flags);
- if (read(&sem->waking) > 0) {
- dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- inc(&sem->count);
- ret = -EINTR;
- }
- local_irq_restore(flags);
- return ret;
-}
-
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- unsigned long flags;
-
- local_irq_save(flags);
- if (read(&sem->waking) <= 0)
- inc(&sem->count);
- else {
- dec(&sem->waking);
- ret = 0;
- }
- local_irq_restore(flags);
- return ret;
-}
-
-#endif /* _ASM_SEMAPHORE_HELPER_H */
-
-
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 31a4ac448195..d9b2034ed1d2 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -1,133 +1 @@
-/* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */
-
-/* On the i386 these are coded in asm, perhaps we should as well. Later.. */
-
-#ifndef _CRIS_SEMAPHORE_H
-#define _CRIS_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * CRIS semaphores, implemented in C-only so far.
- */
-
-struct semaphore {
- atomic_t count;
- atomic_t waking;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .waking = ATOMIC_INIT(0), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/* notice - we probably can do cli/sti here instead of saving */
-
-static inline void down(struct semaphore * sem)
-{
- unsigned long flags;
- int failed;
-
- might_sleep();
-
- /* atomically decrement the semaphores count, and if its negative, we wait */
- cris_atomic_save(sem, flags);
- failed = --(sem->count.counter) < 0;
- cris_atomic_restore(sem, flags);
- if(failed) {
- __down(sem);
- }
-}
-
-/*
- * This version waits in interruptible state so that the waiting
- * process can be killed. The down_interruptible routine
- * returns negative for signalled and zero for semaphore acquired.
- */
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- unsigned long flags;
- int failed;
-
- might_sleep();
-
- /* atomically decrement the semaphores count, and if its negative, we wait */
- cris_atomic_save(sem, flags);
- failed = --(sem->count.counter) < 0;
- cris_atomic_restore(sem, flags);
- if(failed)
- failed = __down_interruptible(sem);
- return(failed);
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- int failed;
-
- cris_atomic_save(sem, flags);
- failed = --(sem->count.counter) < 0;
- cris_atomic_restore(sem, flags);
- if(failed)
- failed = __down_trylock(sem);
- return(failed);
-
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- unsigned long flags;
- int wakeup;
-
- /* atomically increment the semaphores count, and if it was negative, we wake people */
- cris_atomic_save(sem, flags);
- wakeup = ++(sem->count.counter) <= 0;
- cris_atomic_restore(sem, flags);
- if(wakeup) {
- __up(sem);
- }
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index d7aaa1911a1a..d9b2034ed1d2 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -1,155 +1 @@
-/* semaphore.h: semaphores for the FR-V
- *
- * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_SEMAPHORE_H
-#define _ASM_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-/*
- * the semaphore definition
- * - if counter is >0 then there are tokens available on the semaphore for down to collect
- * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct semaphore {
- unsigned counter;
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_SEMAPHORE
- unsigned __magic;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
-{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore *sem, unsigned long flags);
-extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
-extern void __up(struct semaphore *sem);
-
-static inline void down(struct semaphore *sem)
-{
- unsigned long flags;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (likely(sem->counter > 0)) {
- sem->counter--;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- else {
- __down(sem, flags);
- }
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (likely(sem->counter > 0)) {
- sem->counter--;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- else {
- ret = __down_interruptible(sem, flags);
- }
- return ret;
-}
-
-/*
- * non-blockingly attempt to down() a semaphore.
- * - returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int success = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->counter > 0) {
- sem->counter--;
- success = 1;
- }
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- return !success;
-}
-
-static inline void up(struct semaphore *sem)
-{
- unsigned long flags;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
- __up(sem);
- else
- sem->counter++;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
- return sem->counter;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h
deleted file mode 100644
index 4fea36be5fd8..000000000000
--- a/include/asm-h8300/semaphore-helper.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _H8300_SEMAPHORE_HELPER_H
-#define _H8300_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * based on
- * m68k version by Andreas Schwab
- */
-
-#include <linux/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 1;
- if (sem->sleepers <= 0)
- atomic_inc(&sem->count);
- else {
- sem->sleepers--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index f3ffff83ff09..d9b2034ed1d2 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -1,190 +1 @@
-#ifndef _H8300_SEMAPHORE_H
-#define _H8300_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * H8/300 version by Yoshinori Sato
- */
-
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- might_sleep();
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r3l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %2, er1\n\t"
- "dec.l #1,er1\n\t"
- "mov.l er1,%0\n\t"
- "bpl 1f\n\t"
- "ldc r3l,ccr\n\t"
- "mov.l %1,er0\n\t"
- "jsr @___down\n\t"
- "bra 2f\n"
- "1:\n\t"
- "ldc r3l,ccr\n"
- "2:"
- : "=m"(*count)
- : "g"(sem),"m"(*count)
- : "cc", "er1", "er2", "er3");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- might_sleep();
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r1l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %3, er2\n\t"
- "dec.l #1,er2\n\t"
- "mov.l er2,%1\n\t"
- "bpl 1f\n\t"
- "ldc r1l,ccr\n\t"
- "mov.l %2,er0\n\t"
- "jsr @___down_interruptible\n\t"
- "bra 2f\n"
- "1:\n\t"
- "ldc r1l,ccr\n\t"
- "sub.l %0,%0\n\t"
- "2:\n\t"
- : "=r" (count),"=m" (*count)
- : "g"(sem),"m"(*count)
- : "cc", "er1", "er2", "er3");
- return (int)count;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r3l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %3,er2\n\t"
- "dec.l #1,er2\n\t"
- "mov.l er2,%0\n\t"
- "bpl 1f\n\t"
- "ldc r3l,ccr\n\t"
- "jmp @3f\n\t"
- LOCK_SECTION_START(".align 2\n\t")
- "3:\n\t"
- "mov.l %2,er0\n\t"
- "jsr @___down_trylock\n\t"
- "jmp @2f\n\t"
- LOCK_SECTION_END
- "1:\n\t"
- "ldc r3l,ccr\n\t"
- "sub.l %1,%1\n"
- "2:"
- : "=m" (*count),"=r"(count)
- : "g"(sem),"m"(*count)
- : "cc", "er1","er2", "er3");
- return (int)count;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r3l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %2,er1\n\t"
- "inc.l #1,er1\n\t"
- "mov.l er1,%0\n\t"
- "ldc r3l,ccr\n\t"
- "sub.l er2,er2\n\t"
- "cmp.l er2,er1\n\t"
- "bgt 1f\n\t"
- "mov.l %1,er0\n\t"
- "jsr @___up\n"
- "1:"
- : "=m"(*count)
- : "g"(sem),"m"(*count)
- : "cc", "er1", "er2", "er3");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index cd1cc39b5599..fcfad326f4c7 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/numa.h>
#include <asm/system.h>
+#include <asm/numa.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu);
extern void set_cpei_target_cpu(unsigned int cpu);
extern unsigned int get_cpei_target_cpu(void);
extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
#ifdef CONFIG_ACPI_NUMA
#if MAX_NUMNODES > 256
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#define acpi_unlazy_tlb(x)
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu) \
+ for_each_cpu_mask((cpu), early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+ int low_cpu, high_cpu;
+ int cpu;
+ int next_nid = 0;
+
+ low_cpu = cpus_weight(early_cpu_possible_map);
+
+ high_cpu = max(low_cpu, min_cpus);
+ high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+ for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+ cpu_set(cpu, early_cpu_possible_map);
+ if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+ node_cpuid[cpu].nid = next_nid;
+ next_nid++;
+ if (next_nid >= num_online_nodes())
+ next_nid = 0;
+ }
+ }
+}
+#endif /* CONFIG_ACPI_NUMA */
+
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h
index 72400a78002a..f9abdec6577a 100644
--- a/include/asm-ia64/cputime.h
+++ b/include/asm-ia64/cputime.h
@@ -1,6 +1,110 @@
+/*
+ * include/asm-ia64/cputime.h:
+ * Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#include <asm-generic/cputime.h>
+#else
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <asm/processor.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero ((cputime_t)0)
+#define cputime_max ((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+
+#define cputime64_zero ((cputime64_t)0)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime64_sub(__a, __b) ((__a) - (__b))
+#define cputime_to_cputime64(__ct) (__ct)
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
+#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+ cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+ return (ret + val->tv_nsec);
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+ val->tv_sec = ct / NSEC_PER_SEC;
+ val->tv_nsec = ct % NSEC_PER_SEC;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(struct timeval *val)
+{
+ cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+ return (ret + val->tv_usec * NSEC_PER_USEC);
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+ val->tv_sec = ct / NSEC_PER_SEC;
+ val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct)
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
index f8e83eca67a2..5e0c1a6bce8d 100644
--- a/include/asm-ia64/elf.h
+++ b/include/asm-ia64/elf.h
@@ -26,6 +26,7 @@
#define ELF_ARCH EM_IA_64
#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
interpreted as follows by Linux: */
@@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void);
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET 0
+#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
+
typedef unsigned long elf_fpxregset_t;
typedef unsigned long elf_greg_t;
@@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
struct task_struct;
-extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
-extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
-
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
index 1ccf23809329..8fa3f8cd067a 100644
--- a/include/asm-ia64/ide.h
+++ b/include/asm-ia64/ide.h
@@ -16,8 +16,6 @@
#include <linux/irq.h>
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
static inline int ide_default_irq(unsigned long base)
{
switch (base) {
@@ -46,14 +44,6 @@ static inline unsigned long ide_default_io_base(int index)
}
}
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_PCI
-#define ide_init_default_irq(base) (0)
-#else
-#define ide_init_default_irq(base) ide_default_irq(base)
-#endif
-
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index d03bf9ff68e3..ef71b57fc2f4 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -30,8 +30,12 @@
#include <asm/break.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
-#define MAX_INSN_SIZE 1
+#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST (long)(1<<27)
+#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
+ (0x1L << 12) | /* many */ \
+ (((i1) & 1) << 36) | ((i2) << 13))) /* imm */
typedef union cmp_inst {
struct {
@@ -112,6 +116,7 @@ struct arch_specific_insn {
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
#define INST_FLAG_FIX_BRANCH_REG 2
#define INST_FLAG_BREAK_INST 4
+ #define INST_FLAG_BOOSTABLE 8
unsigned long inst_flag;
unsigned short target_br_reg;
unsigned short slot;
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h
index 7e55a584975c..aefcdfee7f23 100644
--- a/include/asm-ia64/kregs.h
+++ b/include/asm-ia64/kregs.h
@@ -31,6 +31,9 @@
#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
+#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
+#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/
+
/* Processor status register bits: */
#define IA64_PSR_BE_BIT 1
#define IA64_PSR_UP_BIT 2
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index f93308f54b61..7245a5781594 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -35,6 +35,7 @@ extern void find_memory (void);
extern void reserve_memory (void);
extern void find_initrd (void);
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
+extern int filter_memory (unsigned long start, unsigned long end, void *arg);
extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
@@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
-extern int register_active_ranges(u64 start, u64 end, void *arg);
+extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h
index 6a8a27cfae3e..3499ff57bf42 100644
--- a/include/asm-ia64/numa.h
+++ b/include/asm-ia64/numa.h
@@ -22,6 +22,8 @@
#include <asm/mmzone.h>
+#define NUMA_NO_NODE -1
+
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 8a695d3407d2..67b02901ead4 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -13,6 +13,7 @@
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
*
* 99/10/01 davidm Make sure we pass zero for reserved parameters.
* 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
@@ -73,6 +74,8 @@
#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
+#define PAL_VP_INFO 50 /* Information about virtual processor features */
+#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */
#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s {
wiv : 1, /* Way field valid */
reserved2 : 1,
dp : 1, /* Data poisoned on MBE */
- reserved3 : 8,
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
index : 20, /* Cache line index */
reserved4 : 2,
@@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s {
dtc : 1, /* Fail in data TC */
itc : 1, /* Fail in inst. TC */
op : 4, /* Cache operation */
- reserved3 : 30,
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
+ reserved4 : 22,
is : 1, /* instruction set (1 == ia32) */
iv : 1, /* instruction set field valid */
@@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s {
way : 6, /* Way of structure */
wv : 1, /* way valid */
xv : 1, /* index valid */
- reserved1 : 8,
+ reserved1 : 6,
+ hlth : 2, /* Health indicator */
index : 8, /* Index or set of the uarch
* structure that failed.
*/
@@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void)
/* Return the machine check dynamic processor state */
static inline s64
-ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
+ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
+ PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
if (size)
*size = iprv.v0;
- if (pds)
- *pds = iprv.v1;
return iprv.status;
}
@@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous)
return iprv.status;
}
+typedef union pal_hw_tracking_u {
+ u64 pht_data;
+ struct {
+ u64 itc :4, /* Instruction cache tracking */
+ dct :4, /* Date cache tracking */
+ itt :4, /* Instruction TLB tracking */
+ ddt :4, /* Data TLB tracking */
+ reserved:48;
+ } pal_hw_tracking_s;
+} pal_hw_tracking_u_t;
+
+/*
+ * Hardware tracking status.
+ */
+static inline s64
+ia64_pal_mc_hw_tracking (u64 *status)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
+ if (status)
+ *status = iprv.v0;
+ return iprv.status;
+}
+
/* Register a platform dependent location with PAL to which it can save
* minimal processor state in the event of a machine check or initialization
* event.
*/
static inline s64
-ia64_pal_mc_register_mem (u64 physical_addr)
+ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
+ PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
+ if (req_size)
+ *req_size = iprv.v0;
return iprv.status;
}
@@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
return iprv.status;
}
+typedef union pal_vp_info_u {
+ u64 pvi_val;
+ struct {
+ u64 index: 48, /* virtual feature set info */
+ vmm_id: 16; /* feature set id */
+ } pal_vp_info_s;
+} pal_vp_info_u_t;
+
+/*
+ * Returns infomation about virtual processor features
+ */
+static inline s64
+ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
+ if (vp_info)
+ *vp_info = iprv.v0;
+ if (vmm_id)
+ *vmm_id = iprv.v1;
+ return iprv.status;
+}
+
typedef union pal_itr_valid_u {
u64 piv_val;
struct {
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e6204f14f614..ed70862ea247 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -371,7 +371,7 @@ pgd_index (unsigned long address)
/* The offset in the 1-level directory is given by the 3 region bits
(61..63) and the level-1 bits. */
static inline pgd_t*
-pgd_offset (struct mm_struct *mm, unsigned long address)
+pgd_offset (const struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
}
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index f4904db3b057..89594b442f83 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -296,6 +296,9 @@ enum {
EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_BUS_ERR_SECT_GUID \
EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
+ EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
+ 0xca, 0x4d)
#define MAX_CACHE_ERRORS 6
#define MAX_TLB_ERRORS 6
@@ -879,6 +882,24 @@ extern void ia64_jump_to_sal(struct sal_to_os_boot *);
extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+#define PALO_MAX_TLB_PURGES 0xFFFF
+#define PALO_SIG "PALO"
+
+struct palo_table {
+ u8 signature[4]; /* Should be "PALO" */
+ u32 length;
+ u8 minor_revision;
+ u8 major_revision;
+ u8 checksum;
+ u8 reserved1[5];
+ u16 max_tlb_purges;
+ u8 reserved2[6];
+};
+
+#define NPTCG_FROM_PAL 0
+#define NPTCG_FROM_PALO 1
+#define NPTCG_FROM_KERNEL_PARAMETER 2
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_SAL_H */
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index d8393d11288d..d9b2034ed1d2 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -1,99 +1 @@
-#ifndef _ASM_IA64_SEMAPHORE_H
-#define _ASM_IA64_SEMAPHORE_H
-
-/*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
- * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void
-sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void
-init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void
-init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down (struct semaphore * sem);
-extern int __down_interruptible (struct semaphore * sem);
-extern int __down_trylock (struct semaphore * sem);
-extern void __up (struct semaphore * sem);
-
-/*
- * Atomically decrement the semaphore's count. If it goes negative,
- * block the calling thread in the TASK_UNINTERRUPTIBLE state.
- */
-static inline void
-down (struct semaphore *sem)
-{
- might_sleep();
- if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
- __down(sem);
-}
-
-/*
- * Atomically decrement the semaphore's count. If it goes negative,
- * block the calling thread in the TASK_INTERRUPTIBLE state.
- */
-static inline int
-down_interruptible (struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int
-down_trylock (struct semaphore *sem)
-{
- int ret = 0;
-
- if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
- ret = __down_trylock(sem);
- return ret;
-}
-
-static inline void
-up (struct semaphore * sem)
-{
- if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
- __up(sem);
-}
-
-#endif /* _ASM_IA64_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 4fa733dd417a..ec5f355fb7e3 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,9 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait);
+
#define hard_smp_processor_id() ia64_get_lid()
#ifdef CONFIG_SMP
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 595112bca3cc..dff8128fa58e 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -210,6 +210,13 @@ struct task_struct;
extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
+# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
+#else
+# define IA64_ACCOUNT_ON_SWITCH(p,n)
+#endif
+
#ifdef CONFIG_PERFMON
DECLARE_PER_CPU(unsigned long, pfm_syst_info);
# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@@ -222,6 +229,7 @@ extern void ia64_load_extra (struct task_struct *task);
|| IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
+ IA64_ACCOUNT_ON_SWITCH(prev, next); \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
@@ -266,6 +274,10 @@ void cpu_idle_wait(void);
void default_idle(void);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 93d83cbe0c8c..6da8069a0f77 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -31,6 +31,12 @@ struct thread_info {
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ __u64 ac_stamp;
+ __u64 ac_leave;
+ __u64 ac_stime;
+ __u64 ac_utime;
+#endif
};
#define THREAD_SIZE KERNEL_STACK_SIZE
@@ -62,9 +68,17 @@ struct thread_info {
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->ac_stime = 0; \
+ task_thread_info(p)->ac_utime = 0; \
+ task_thread_info(p)->task = (p);
+#else
#define setup_thread_stack(p, org) \
*task_thread_info(p) = *task_thread_info(org); \
task_thread_info(p)->task = (p);
+#endif
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
index 26edcb750f9f..20d8a39680c2 100644
--- a/include/asm-ia64/tlb.h
+++ b/include/asm-ia64/tlb.h
@@ -64,6 +64,32 @@ struct mmu_gather {
struct page *pages[FREE_PTE_NR];
};
+struct ia64_tr_entry {
+ u64 ifa;
+ u64 itir;
+ u64 pte;
+ u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+
+extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_RID_MASK 0x00000000ffffff00L
+#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
+
/* Users of the generic TLB shootdown code must declare this storage space. */
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index 7774a1cac0cc..3be25dfed164 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -17,6 +17,7 @@
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
+extern void setup_ptcg_sem(int max_purges, int from_palo);
/*
* Flush everything (kernel mapping may also have changed due to
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h
index 5d2044e529ab..1e7f6474d130 100644
--- a/include/asm-m32r/ide.h
+++ b/include/asm-m32r/ide.h
@@ -23,8 +23,6 @@
# endif
#endif
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
static __inline__ int ide_default_irq(unsigned long base)
{
switch (base) {
@@ -65,14 +63,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
}
}
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-#define ide_init_default_irq(base) (0)
-#else
-#define ide_init_default_irq(base) ide_default_irq(base)
-#endif
-
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index b5bf95a6f2b4..d9b2034ed1d2 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -1,144 +1 @@
-#ifndef _ASM_M32R_SEMAPHORE_H
-#define _ASM_M32R_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
- */
-
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-#include <asm/assembler.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * Atomically decrement the semaphore's count. If it goes negative,
- * block the calling thread in the TASK_UNINTERRUPTIBLE state.
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- __down(sem);
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int result = 0;
-
- might_sleep();
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- result = __down_interruptible(sem);
-
- return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- long count;
- int result = 0;
-
- local_irq_save(flags);
- __asm__ __volatile__ (
- "# down_trylock \n\t"
- DCACHE_CLEAR("%0", "r4", "%1")
- M32R_LOCK" %0, @%1; \n\t"
- "addi %0, #-1; \n\t"
- M32R_UNLOCK" %0, @%1; \n\t"
- : "=&r" (count)
- : "r" (&sem->count)
- : "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r4"
-#endif /* CONFIG_CHIP_M32700_TS1 */
- );
- local_irq_restore(flags);
-
- if (unlikely(count < 0))
- result = __down_trylock(sem);
-
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_M32R_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h
deleted file mode 100644
index eef30ba0b499..000000000000
--- a/include/asm-m68k/semaphore-helper.h
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef _M68K_SEMAPHORE_HELPER_H
-#define _M68K_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-#include <linux/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-#ifndef CONFIG_RMW_INSNS
-extern spinlock_t semaphore_wake_lock;
-#endif
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
-#ifndef CONFIG_RMW_INSNS
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
- int tmp1, tmp2;
-
- __asm__ __volatile__
- ("1: movel %1,%2\n"
- " jle 2f\n"
- " subql #1,%2\n"
- " casl %1,%2,%3\n"
- " jne 1b\n"
- " moveq #1,%0\n"
- "2:"
- : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
- : "m" (sem->waking), "0" (0), "1" (sem->waking));
-#endif
-
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret;
-#ifndef CONFIG_RMW_INSNS
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
- int tmp1, tmp2;
-
- __asm__ __volatile__
- ("1: movel %1,%2\n"
- " jle 2f\n"
- " subql #1,%2\n"
- " casl %1,%2,%3\n"
- " jne 1b\n"
- " moveq #1,%0\n"
- " jra %a4\n"
- "2:"
- : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
- : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
- if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
-next:
-#endif
-
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret;
-#ifndef CONFIG_RMW_INSNS
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 1;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 0;
- } else
- atomic_inc(&sem->count);
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
- int tmp1, tmp2;
-
- __asm__ __volatile__
- ("1: movel %1,%2\n"
- " jle 2f\n"
- " subql #1,%2\n"
- " casl %1,%2,%3\n"
- " jne 1b\n"
- " moveq #0,%0\n"
- "2:"
- : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
- : "m" (sem->waking), "0" (1), "1" (sem->waking));
- if (ret)
- atomic_inc(&sem->count);
-#endif
- return ret;
-}
-
-#endif
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index 64d6b119bb0a..d9b2034ed1d2 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -1,163 +1 @@
-#ifndef _M68K_SEMAPHORE_H
-#define _M68K_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/stringify.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-struct semaphore {
- atomic_t count;
- atomic_t waking;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .waking = ATOMIC_INIT(0), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
-
- might_sleep();
- __asm__ __volatile__(
- "| atomic down operation\n\t"
- "subql #1,%0@\n\t"
- "jmi 2f\n\t"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\tpea 1b\n\t"
- "jbra __down_failed\n"
- LOCK_SECTION_END
- : /* no outputs */
- : "a" (sem1)
- : "memory");
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
- register int result __asm__ ("%d0");
-
- might_sleep();
- __asm__ __volatile__(
- "| atomic interruptible down operation\n\t"
- "subql #1,%1@\n\t"
- "jmi 2f\n\t"
- "clrl %0\n"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\tpea 1b\n\t"
- "jbra __down_failed_interruptible\n"
- LOCK_SECTION_END
- : "=d" (result)
- : "a" (sem1)
- : "memory");
- return result;
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
- register int result __asm__ ("%d0");
-
- __asm__ __volatile__(
- "| atomic down trylock operation\n\t"
- "subql #1,%1@\n\t"
- "jmi 2f\n\t"
- "clrl %0\n"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\tpea 1b\n\t"
- "jbra __down_failed_trylock\n"
- LOCK_SECTION_END
- : "=d" (result)
- : "a" (sem1)
- : "memory");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
-
- __asm__ __volatile__(
- "| atomic up operation\n\t"
- "addql #1,%0@\n\t"
- "jle 2f\n"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\t"
- "pea 1b\n\t"
- "jbra __up_wakeup\n"
- LOCK_SECTION_END
- : /* no outputs */
- : "a" (sem1)
- : "memory");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h
deleted file mode 100644
index 43da7bc483c7..000000000000
--- a/include/asm-m68knommu/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef _M68K_SEMAPHORE_HELPER_H
-#define _M68K_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 1;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 0;
- } else
- atomic_inc(&sem->count);
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 5779eb6c0689..d9b2034ed1d2 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -1,153 +1 @@
-#ifndef _M68K_SEMAPHORE_H
-#define _M68K_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-struct semaphore {
- atomic_t count;
- atomic_t waking;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .waking = ATOMIC_INIT(0), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- __asm__ __volatile__(
- "| atomic down operation\n\t"
- "movel %0, %%a1\n\t"
- "lea %%pc@(1f), %%a0\n\t"
- "subql #1, %%a1@\n\t"
- "jmi __down_failed\n"
- "1:"
- : /* no outputs */
- : "g" (sem)
- : "cc", "%a0", "%a1", "memory");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret;
-
- might_sleep();
- __asm__ __volatile__(
- "| atomic down operation\n\t"
- "movel %1, %%a1\n\t"
- "lea %%pc@(1f), %%a0\n\t"
- "subql #1, %%a1@\n\t"
- "jmi __down_failed_interruptible\n\t"
- "clrl %%d0\n"
- "1: movel %%d0, %0\n"
- : "=d" (ret)
- : "g" (sem)
- : "cc", "%d0", "%a0", "%a1", "memory");
- return(ret);
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
- register int result __asm__ ("%d0");
-
- __asm__ __volatile__(
- "| atomic down trylock operation\n\t"
- "subql #1,%1@\n\t"
- "jmi 2f\n\t"
- "clrl %0\n"
- "1:\n"
- ".section .text.lock,\"ax\"\n"
- ".even\n"
- "2:\tpea 1b\n\t"
- "jbra __down_failed_trylock\n"
- ".previous"
- : "=d" (result)
- : "a" (sem1)
- : "memory");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- __asm__ __volatile__(
- "| atomic up operation\n\t"
- "movel %0, %%a1\n\t"
- "lea %%pc@(1f), %%a0\n\t"
- "addql #1, %%a1@\n\t"
- "jle __up_wakeup\n"
- "1:"
- : /* no outputs */
- : "g" (sem)
- : "cc", "%a0", "%a1", "memory");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h
index e4fe26c160ba..89655c0cdcd6 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_ide.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h
@@ -122,24 +122,6 @@ static const struct drive_list_entry dma_black_list [] = {
};
#endif
-/* function prototyping */
-u8 auide_inb(unsigned long port);
-u16 auide_inw(unsigned long port);
-u32 auide_inl(unsigned long port);
-void auide_insw(unsigned long port, void *addr, u32 count);
-void auide_insl(unsigned long port, void *addr, u32 count);
-void auide_outb(u8 addr, unsigned long port);
-void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port);
-void auide_outw(u16 addr, unsigned long port);
-void auide_outl(u32 addr, unsigned long port);
-void auide_outsw(unsigned long port, void *addr, u32 count);
-void auide_outsl(unsigned long port, void *addr, u32 count);
-static void auide_tune_drive(ide_drive_t *drive, byte pio);
-static int auide_tune_chipset(ide_drive_t *drive, u8 speed);
-static int auide_ddma_init( _auide_hwif *auide );
-static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif);
-int __init auide_probe(void);
-
/*******************************************************************************
* PIO Mode timing calculation : *
* *
diff --git a/include/asm-mips/mach-db1x00/db1200.h b/include/asm-mips/mach-db1x00/db1200.h
index a6bdac61ab49..d2e28e64932e 100644
--- a/include/asm-mips/mach-db1x00/db1200.h
+++ b/include/asm-mips/mach-db1x00/db1200.h
@@ -173,8 +173,8 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
#define AU1XXX_SMC91111_IRQ DB1200_ETH_INT
#define AU1XXX_ATA_PHYS_ADDR (0x18800000)
-#define AU1XXX_ATA_PHYS_LEN (0x100)
-#define AU1XXX_ATA_REG_OFFSET (5)
+#define AU1XXX_ATA_REG_OFFSET (5)
+#define AU1XXX_ATA_PHYS_LEN (16 << AU1XXX_ATA_REG_OFFSET)
#define AU1XXX_ATA_INT DB1200_IDE_INT
#define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1;
#define AU1XXX_ATA_RQSIZE 128
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h
index 4ec2b930dfbb..0f6c251f5fec 100644
--- a/include/asm-mips/mach-generic/ide.h
+++ b/include/asm-mips/mach-generic/ide.h
@@ -27,8 +27,6 @@
# endif
#endif
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
static __inline__ int ide_probe_legacy(void)
{
#ifdef CONFIG_PCI
@@ -98,14 +96,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
}
}
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-#define ide_init_default_irq(base) (0)
-#else
-#define ide_init_default_irq(base) ide_default_irq(base)
-#endif
-
/* MIPS port and memory-mapped I/O string operations. */
static inline void __ide_flush_prologue(void)
{
diff --git a/include/asm-mips/mach-pb1x00/pb1200.h b/include/asm-mips/mach-pb1x00/pb1200.h
index 72213e3d02c7..edaa489b58f1 100644
--- a/include/asm-mips/mach-pb1x00/pb1200.h
+++ b/include/asm-mips/mach-pb1x00/pb1200.h
@@ -186,8 +186,8 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
#define AU1XXX_SMC91111_IRQ PB1200_ETH_INT
#define AU1XXX_ATA_PHYS_ADDR (0x0C800000)
-#define AU1XXX_ATA_PHYS_LEN (0x100)
-#define AU1XXX_ATA_REG_OFFSET (5)
+#define AU1XXX_ATA_REG_OFFSET (5)
+#define AU1XXX_ATA_PHYS_LEN (16 << AU1XXX_ATA_REG_OFFSET)
#define AU1XXX_ATA_INT PB1200_IDE_INT
#define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1;
#define AU1XXX_ATA_RQSIZE 128
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index fdf8042b784b..d9b2034ed1d2 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,108 +1 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle
- * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
- *
- * In all honesty, little of the old MIPS code left - the PPC64 variant was
- * just looking nice and portable so I ripped it. Credits to whoever wrote
- * it.
- */
-#ifndef __ASM_SEMAPHORE_H
-#define __ASM_SEMAPHORE_H
-
-/*
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- /*
- * Note that any negative value of count is equivalent to 0,
- * but additionally indicates that some process(es) might be
- * sleeping on `wait'.
- */
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- /*
- * Try to get the semaphore, take the slow path if we fail.
- */
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
-
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h
index 5a9e1ad0b253..d9b2034ed1d2 100644
--- a/include/asm-mn10300/semaphore.h
+++ b/include/asm-mn10300/semaphore.h
@@ -1,169 +1 @@
-/* MN10300 Semaphores
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_SEMAPHORE_H
-#define _ASM_SEMAPHORE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#define SEMAPHORE_DEBUG 0
-
-/*
- * the semaphore definition
- * - if count is >0 then there are tokens available on the semaphore for down
- * to collect
- * - if count is <=0 then there are no spare tokens, and anyone that wants one
- * must wait
- * - if wait_list is not empty, then there are processes waiting for the
- * semaphore
- */
-struct semaphore {
- atomic_t count; /* it's not really atomic, it's
- * just that certain modules
- * expect to be able to access
- * it directly */
- spinlock_t wait_lock;
- struct list_head wait_list;
-#if SEMAPHORE_DEBUG
- unsigned __magic;
-#endif
-};
-
-#if SEMAPHORE_DEBUG
-# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-
-#define __SEMAPHORE_INITIALIZER(name, init_count) \
-{ \
- .count = ATOMIC_INIT(init_count), \
- .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- .wait_list = LIST_HEAD_INIT((name).wait_list) \
- __SEM_DEBUG_INIT(name) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore *sem, unsigned long flags);
-extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
-extern void __up(struct semaphore *sem);
-
-static inline void down(struct semaphore *sem)
-{
- unsigned long flags;
- int count;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- count = atomic_read(&sem->count);
- if (likely(count > 0)) {
- atomic_set(&sem->count, count - 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- } else {
- __down(sem, flags);
- }
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- unsigned long flags;
- int count, ret = 0;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- count = atomic_read(&sem->count);
- if (likely(count > 0)) {
- atomic_set(&sem->count, count - 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- } else {
- ret = __down_interruptible(sem, flags);
- }
- return ret;
-}
-
-/*
- * non-blockingly attempt to down() a semaphore.
- * - returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int count, success = 0;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- count = atomic_read(&sem->count);
- if (likely(count > 0)) {
- atomic_set(&sem->count, count - 1);
- success = 1;
- }
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- return !success;
-}
-
-static inline void up(struct semaphore *sem)
-{
- unsigned long flags;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
- __up(sem);
- else
- atomic_set(&sem->count, atomic_read(&sem->count) + 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
- return atomic_read(&sem->count);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h
index be8760fbc8ee..db0c94410095 100644
--- a/include/asm-parisc/ide.h
+++ b/include/asm-parisc/ide.h
@@ -17,8 +17,6 @@
#define MAX_HWIFS 2
#endif
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h
deleted file mode 100644
index 387f7c1277a2..000000000000
--- a/include/asm-parisc/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _ASM_PARISC_SEMAPHORE_HELPER_H
-#define _ASM_PARISC_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->waking);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
- atomic_inc(&sem->count);
- else {
- sem->waking--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index a16271cdc748..d9b2034ed1d2 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -1,145 +1 @@
-/* SMP- and interrupt-safe semaphores.
- * PA-RISC version by Matthew Wilcox
- *
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org >
- * Copyright (C) 2000 Grant Grundler < grundler a debian org >
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_PARISC_SEMAPHORE_H
-#define _ASM_PARISC_SEMAPHORE_H
-
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-
-/*
- * The `count' is initialised to the number of people who are allowed to
- * take the lock. (Normally we want a mutex, so this is `1'). if
- * `count' is positive, the lock can be taken. if it's 0, no-one is
- * waiting on it. if it's -1, at least one task is waiting.
- */
-struct semaphore {
- spinlock_t sentry;
- int count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .sentry = SPIN_LOCK_UNLOCKED, \
- .count = n, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
- return sem->count;
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/* Semaphores can be `tried' from irq context. So we have to disable
- * interrupts while we're messing with the semaphore. Sorry.
- */
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- spin_lock_irq(&sem->sentry);
- if (sem->count > 0) {
- sem->count--;
- } else {
- __down(sem);
- }
- spin_unlock_irq(&sem->sentry);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
- might_sleep();
- spin_lock_irq(&sem->sentry);
- if (sem->count > 0) {
- sem->count--;
- } else {
- ret = __down_interruptible(sem);
- }
- spin_unlock_irq(&sem->sentry);
- return ret;
-}
-
-/*
- * down_trylock returns 0 on success, 1 if we failed to get the lock.
- * May not sleep, but must preserve irq state
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- int count;
-
- spin_lock_irqsave(&sem->sentry, flags);
- count = sem->count - 1;
- if (count >= 0)
- sem->count = count;
- spin_unlock_irqrestore(&sem->sentry, flags);
- return (count < 0);
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sem->sentry, flags);
- if (sem->count < 0) {
- __up(sem);
- } else {
- sem->count++;
- }
- spin_unlock_irqrestore(&sem->sentry, flags);
-}
-
-#endif /* _ASM_PARISC_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h
index 6d50310ecaea..3d90bf7d3d73 100644
--- a/include/asm-powerpc/ide.h
+++ b/include/asm-powerpc/ide.h
@@ -31,39 +31,48 @@
#include <linux/hdreg.h>
#include <linux/ioport.h>
-struct ide_machdep_calls {
- int (*default_irq)(unsigned long base);
- unsigned long (*default_io_base)(int index);
- void (*ide_init_hwif)(hw_regs_t *hw,
- unsigned long data_port,
- unsigned long ctrl_port,
- int *irq);
-};
-
-extern struct ide_machdep_calls ppc_ide_md;
-
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
+/* FIXME: use ide_platform host driver */
static __inline__ int ide_default_irq(unsigned long base)
{
- if (ppc_ide_md.default_irq)
- return ppc_ide_md.default_irq(base);
+#ifdef CONFIG_PPLUS
+ switch (base) {
+ case 0x1f0: return 14;
+ case 0x170: return 15;
+ }
+#endif
+#ifdef CONFIG_PPC_PREP
+ switch (base) {
+ case 0x1f0: return 13;
+ case 0x170: return 13;
+ case 0x1e8: return 11;
+ case 0x168: return 10;
+ case 0xfff0: return 14; /* MCP(N)750 ide0 */
+ case 0xffe0: return 15; /* MCP(N)750 ide1 */
+ }
+#endif
return 0;
}
+/* FIXME: use ide_platform host driver */
static __inline__ unsigned long ide_default_io_base(int index)
{
- if (ppc_ide_md.default_io_base)
- return ppc_ide_md.default_io_base(index);
+#ifdef CONFIG_PPLUS
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ }
+#endif
+#ifdef CONFIG_PPC_PREP
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ case 2: return 0x1e8;
+ case 3: return 0x168;
+ }
+#endif
return 0;
}
-#ifdef CONFIG_PCI
-#define ide_init_default_irq(base) (0)
-#else
-#define ide_init_default_irq(base) ide_default_irq(base)
-#endif
-
#ifdef CONFIG_BLK_DEV_MPC8xx_IDE
#define IDE_ARCH_ACK_INTR 1
#define ide_ack_intr(hwif) ((hwif)->ack_intr ? (hwif)->ack_intr(hwif) : 1)
@@ -71,8 +80,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
#endif /* __powerpc64__ */
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_IDE_H */
diff --git a/include/asm-powerpc/mediabay.h b/include/asm-powerpc/mediabay.h
index de83fe196309..df111c362a7f 100644
--- a/include/asm-powerpc/mediabay.h
+++ b/include/asm-powerpc/mediabay.h
@@ -22,10 +22,14 @@ int check_media_bay(struct device_node *which_bay, int what);
/* Number of bays in the machine or 0 */
extern int media_bay_count;
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
+#include <linux/ide.h>
+
int check_media_bay_by_base(unsigned long base, int what);
/* called by IDE PMAC host driver to register IDE controller for media bay */
int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base,
- int irq, int index);
+ int irq, ide_hwif_t *hwif);
+#endif
#endif /* __KERNEL__ */
#endif /* _PPC_MEDIABAY_H */
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
index 48dd32e07749..d9b2034ed1d2 100644
--- a/include/asm-powerpc/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,94 +1 @@
-#ifndef _ASM_POWERPC_SEMAPHORE_H
-#define _ASM_POWERPC_SEMAPHORE_H
-
-/*
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- /*
- * Note that any negative value of count is equivalent to 0,
- * but additionally indicates that some process(es) might be
- * sleeping on `wait'.
- */
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- /*
- * Try to get the semaphore, take the slow path if we fail.
- */
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
-
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_POWERPC_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 123b557c3ff4..0818ecd30ca6 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -397,6 +397,10 @@ struct cio_iplinfo {
extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
+/* Function from drivers/s390/cio/chsc.c */
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
+int chsc_sstpi(void *page, void *result, size_t size);
+
#endif
#endif
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
index 352dde194f3c..e5a6a9ba3adf 100644
--- a/include/asm-s390/cpu.h
+++ b/include/asm-s390/cpu.h
@@ -22,4 +22,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
+void s390_idle_leave(void);
+
+static inline void s390_idle_check(void)
+{
+ if ((&__get_cpu_var(s390_idle))->in_idle)
+ s390_idle_leave();
+}
+
#endif /* _ASM_S390_CPU_H_ */
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h
index c00dd2b3dc50..335baf4fc64f 100644
--- a/include/asm-s390/debug.h
+++ b/include/asm-s390/debug.h
@@ -73,6 +73,7 @@ typedef struct debug_info {
struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
struct debug_view* views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
+ mode_t mode;
} debug_info_t;
typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level,
debug_info_t* debug_register(char* name, int pages, int nr_areas,
int buf_size);
+debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
+ int buf_size, mode_t mode, uid_t uid,
+ gid_t gid);
+
void debug_unregister(debug_info_t* id);
void debug_set_level(debug_info_t* id, int new_level);
diff --git a/include/asm-s390/extmem.h b/include/asm-s390/extmem.h
index c8802c934b74..33837d756184 100644
--- a/include/asm-s390/extmem.h
+++ b/include/asm-s390/extmem.h
@@ -22,11 +22,12 @@
#define SEGMENT_SHARED 0
#define SEGMENT_EXCLUSIVE 1
-extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
-extern void segment_unload(char *name);
-extern void segment_save(char *name);
-extern int segment_type (char* name);
-extern int segment_modify_shared (char *name, int do_nonshared);
+int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
+void segment_unload(char *name);
+void segment_save(char *name);
+int segment_type (char* name);
+int segment_modify_shared (char *name, int do_nonshared);
+void segment_warning(int rc, char *seg_name);
#endif
#endif
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index 31beb18cb3d1..4b7cb964ff35 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@ typedef struct {
#define HARDIRQ_BITS 8
-extern void account_ticks(u64 time);
+void clock_comparator_work(void);
#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 801a6fd35b5b..5de3efb31445 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -56,6 +56,8 @@
#define __LC_IO_INT_WORD 0x0C0
#define __LC_MCCK_CODE 0x0E8
+#define __LC_LAST_BREAK 0x110
+
#define __LC_RETURN_PSW 0x200
#define __LC_SAVE_AREA 0xC00
@@ -80,7 +82,6 @@
#define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C
-#define __LC_JIFFY_TIMER 0xC80
#define __LC_CURRENT 0xC90
#define __LC_INT_CLOCK 0xC98
#else /* __s390x__ */
@@ -103,7 +104,6 @@
#define __LC_CPUID 0xD80
#define __LC_CPUADDR 0xD88
#define __LC_IPLDEV 0xDB8
-#define __LC_JIFFY_TIMER 0xDC0
#define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDE8
#endif /* __s390x__ */
@@ -276,7 +276,7 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
- __u64 jiffy_timer; /* 0xc80 */
+ __u64 clock_comparator; /* 0xc80 */
__u32 ext_call_fast; /* 0xc88 */
__u32 percpu_offset; /* 0xc8c */
__u32 current_task; /* 0xc90 */
@@ -368,11 +368,12 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
- __u64 jiffy_timer; /* 0xdc0 */
+ __u64 clock_comparator; /* 0xdc0 */
__u64 ext_call_fast; /* 0xdc8 */
__u64 percpu_offset; /* 0xdd0 */
__u64 current_task; /* 0xdd8 */
- __u64 softirq_pending; /* 0xde0 */
+ __u32 softirq_pending; /* 0xde0 */
+ __u32 pad_0x0de4; /* 0xde4 */
__u64 int_clock; /* 0xde8 */
__u8 pad12[0xe00-0xdf0]; /* 0xdf0 */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 51d88912aa20..8eaf343a12a8 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task);
extern void show_registers(struct pt_regs *regs);
extern void show_code(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
+#ifdef CONFIG_64BIT
+extern void show_last_breaking_event(struct pt_regs *regs);
+#else
+static inline void show_last_breaking_event(struct pt_regs *regs)
+{
+}
+#endif
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 0e7001ad8392..d9b2034ed1d2 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -1,107 +1 @@
-/*
- * include/asm-s390/semaphore.h
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *
- * Derived from "include/asm-i386/semaphore.h"
- * (C) Copyright 1996 Linus Torvalds
- */
-
-#ifndef _S390_SEMAPHORE_H
-#define _S390_SEMAPHORE_H
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- /*
- * Note that any negative value of count is equivalent to 0,
- * but additionally indicates that some process(es) might be
- * sleeping on `wait'.
- */
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
- { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- int old_val, new_val;
-
- /*
- * This inline assembly atomically implements the equivalent
- * to the following C code:
- * old_val = sem->count.counter;
- * if ((new_val = old_val) > 0)
- * sem->count.counter = --new_val;
- * In the ppc code this is called atomic_dec_if_positive.
- */
- asm volatile(
- " l %0,0(%3)\n"
- "0: ltr %1,%0\n"
- " jle 1f\n"
- " ahi %1,-1\n"
- " cs %0,%1,0(%3)\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
- : "a" (&sem->count.counter), "m" (sem->count.counter)
- : "cc", "memory");
- return old_val <= 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index c7b74326a527..6f3821a6a902 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);
+extern struct mutex smp_cpu_state_mutex;
+extern int smp_cpu_polarization[];
+
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
#endif
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h
new file mode 100644
index 000000000000..abe10ae15e46
--- /dev/null
+++ b/include/asm-s390/sysinfo.h
@@ -0,0 +1,116 @@
+/*
+ * definition for store system information stsi
+ *
+ * Copyright IBM Corp. 2001,2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Ulrich Weigand <weigand@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+struct sysinfo_1_1_1 {
+ char reserved_0[32];
+ char manufacturer[16];
+ char type[4];
+ char reserved_1[12];
+ char model_capacity[16];
+ char sequence[16];
+ char plant[4];
+ char model[16];
+ char model_perm_cap[16];
+ char model_temp_cap[16];
+ char model_cap_rating[4];
+ char model_perm_cap_rating[4];
+ char model_temp_cap_rating[4];
+};
+
+struct sysinfo_1_2_1 {
+ char reserved_0[80];
+ char sequence[16];
+ char plant[4];
+ char reserved_1[2];
+ unsigned short cpu_address;
+};
+
+struct sysinfo_1_2_2 {
+ char format;
+ char reserved_0[1];
+ unsigned short acc_offset;
+ char reserved_1[24];
+ unsigned int secondary_capability;
+ unsigned int capability;
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ unsigned short adjustment[0];
+};
+
+struct sysinfo_1_2_2_extension {
+ unsigned int alt_capability;
+ unsigned short alt_adjustment[0];
+};
+
+struct sysinfo_2_2_1 {
+ char reserved_0[80];
+ char sequence[16];
+ char plant[4];
+ unsigned short cpu_id;
+ unsigned short cpu_address;
+};
+
+struct sysinfo_2_2_2 {
+ char reserved_0[32];
+ unsigned short lpar_number;
+ char reserved_1;
+ unsigned char characteristics;
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ char name[8];
+ unsigned int caf;
+ char reserved_2[16];
+ unsigned short cpus_dedicated;
+ unsigned short cpus_shared;
+};
+
+#define LPAR_CHAR_DEDICATED (1 << 7)
+#define LPAR_CHAR_SHARED (1 << 6)
+#define LPAR_CHAR_LIMITED (1 << 5)
+
+struct sysinfo_3_2_2 {
+ char reserved_0[31];
+ unsigned char count;
+ struct {
+ char reserved_0[4];
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ char name[8];
+ unsigned int caf;
+ char cpi[16];
+ char reserved_1[24];
+
+ } vm[8];
+};
+
+static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
+{
+ register int r0 asm("0") = (fc << 28) | sel1;
+ register int r1 asm("1") = sel2;
+
+ asm volatile(
+ " stsi 0(%2)\n"
+ "0: jz 2f\n"
+ "1: lhi %0,%3\n"
+ "2:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
+ : "cc", "memory");
+ return r0;
+}
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 15aba30601a3..92098df4d6e3 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask)
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
+int stfle(unsigned long long *list, int doublewords);
+
#ifdef CONFIG_SMP
extern void smp_ctl_set_bit(int cr, int bit);
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 98229db24314..d744c3d62de5 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void)
return clk;
}
-static inline void get_clock_extended(void *dest)
+static inline unsigned long long get_clock_xt(void)
{
- typedef struct { unsigned long long clk[2]; } __clock_t;
+ unsigned char clk[16];
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
- asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc");
+ asm volatile("stcke %0" : "=Q" (clk) : : "cc");
#else /* __GNUC__ */
- asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest))
- : "a" ((__clock_t *)dest) : "cc");
+ asm volatile("stcke 0(%1)" : "=m" (clk)
+ : "a" (clk) : "cc");
#endif /* __GNUC__ */
+
+ return *((unsigned long long *)&clk[1]);
}
static inline cycles_t get_cycles(void)
@@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void)
int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
+unsigned long long monotonic_clock(void);
#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 35fb4f9127b2..9e57a93d7de1 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void)
asm volatile("ptlb" : : : "memory");
}
+#ifdef CONFIG_SMP
/*
* Flush all tlb entries on all cpus.
*/
+void smp_ptlb_all(void);
+
static inline void __tlb_flush_global(void)
{
- extern void smp_ptlb_all(void);
register unsigned long reg2 asm("2");
register unsigned long reg3 asm("3");
register unsigned long reg4 asm("4");
@@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void)
: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
}
+static inline void __tlb_flush_full(struct mm_struct *mm)
+{
+ cpumask_t local_cpumask;
+
+ preempt_disable();
+ /*
+ * If the process only ran on the local cpu, do a local flush.
+ */
+ local_cpumask = cpumask_of_cpu(smp_processor_id());
+ if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
+ __tlb_flush_local();
+ else
+ __tlb_flush_global();
+ preempt_enable();
+}
+#else
+#define __tlb_flush_full(mm) __tlb_flush_local()
+#endif
+
/*
* Flush all tlb entries of a page table on all cpus.
*/
@@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
- cpumask_t local_cpumask;
-
if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return;
/*
@@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
mm->context.asce_bits);
return;
}
- preempt_disable();
- /*
- * If the process only ran on the local cpu, do a local flush.
- */
- local_cpumask = cpumask_of_cpu(smp_processor_id());
- if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
- __tlb_flush_local();
- else
- __tlb_flush_global();
- preempt_enable();
+ __tlb_flush_full(mm);
}
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h
index 613aa64019da..8e97b06f298a 100644
--- a/include/asm-s390/topology.h
+++ b/include/asm-s390/topology.h
@@ -1,6 +1,29 @@
#ifndef _ASM_S390_TOPOLOGY_H
#define _ASM_S390_TOPOLOGY_H
+#include <linux/cpumask.h>
+
+#define mc_capable() (1)
+
+cpumask_t cpu_coregroup_map(unsigned int cpu);
+
+int topology_set_cpu_management(int fc);
+void topology_schedule_update(void);
+
+#define POLARIZATION_UNKNWN (-1)
+#define POLARIZATION_HRZ (0)
+#define POLARIZATION_VL (1)
+#define POLARIZATION_VM (2)
+#define POLARIZATION_VH (3)
+
+#ifdef CONFIG_SMP
+void s390_init_cpu_topology(void);
+#else
+static inline void s390_init_cpu_topology(void)
+{
+};
+#endif
+
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h
index 9f8e9142dc33..58e0bdd52be4 100644
--- a/include/asm-sh/ide.h
+++ b/include/asm-sh/ide.h
@@ -14,9 +14,6 @@
#ifdef __KERNEL__
-
-#define ide_default_io_ctl(base) (0)
-
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h
deleted file mode 100644
index bd8230c369ca..000000000000
--- a/include/asm-sh/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef __ASM_SH_SEMAPHORE_HELPER_H
-#define __ASM_SH_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers <= 0)
- atomic_inc(&sem->count);
- else {
- sem->sleepers--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 9e5a37c4dce2..d9b2034ed1d2 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -1,115 +1 @@
-#ifndef __ASM_SH_SEMAPHORE_H
-#define __ASM_SH_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-/*
- * SMP- and interrupt-safe semaphores.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * SuperH verison by Niibe Yutaka
- * (Currently no asm implementation but generic C code...)
- */
-
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/wait.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-#if 0
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-#endif
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- int ret = 0;
-
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_trylock(sem);
- return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
-
-#endif
-#endif /* __ASM_SH_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h
index 4076cb5d1581..afd1736ed480 100644
--- a/include/asm-sparc/ide.h
+++ b/include/asm-sparc/ide.h
@@ -17,8 +17,6 @@
#undef MAX_HWIFS
#define MAX_HWIFS 2
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
#define __ide_insl(data_reg, buffer, wcount) \
__ide_insw(data_reg, buffer, (wcount)<<1)
#define __ide_outsl(data_reg, buffer, wcount) \
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 8018f9f4d497..d9b2034ed1d2 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -1,192 +1 @@
-#ifndef _SPARC_SEMAPHORE_H
-#define _SPARC_SEMAPHORE_H
-
-/* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic24_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC24_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic24_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- might_sleep();
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "bl 2f\n\t"
- " nop\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "ba 1b\n\t"
- " restore %%l5, %%g0, %%g5\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__down)
- : "g3", "g4", "g7", "memory", "cc");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- might_sleep();
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "bl 2f\n\t"
- " clr %%g2\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "mov %%l5, %%g5\n\t"
- "ba 1b\n\t"
- " restore %%o0, %%g0, %%g2\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__down_interruptible)
- : "g3", "g4", "g7", "memory", "cc");
-
- return increment;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "bl 2f\n\t"
- " clr %%g2\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "mov %%l5, %%g5\n\t"
- "ba 1b\n\t"
- " restore %%o0, %%g0, %%g2\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__down_trylock)
- : "g3", "g4", "g7", "memory", "cc");
-
- return increment;
-}
-
-static inline void up(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_add\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "ble 2f\n\t"
- " nop\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "ba 1b\n\t"
- " restore %%l5, %%g0, %%g5\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__up)
- : "g3", "g4", "g7", "memory", "cc");
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_SPARC_SEMAPHORE_H) */
+#include <linux/semaphore.h>
diff --git a/include/asm-sparc64/ide.h b/include/asm-sparc64/ide.h
index ac7eb210b941..c5fdabe0b42d 100644
--- a/include/asm-sparc64/ide.h
+++ b/include/asm-sparc64/ide.h
@@ -24,8 +24,6 @@
# endif
#endif
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
#define __ide_insl(data_reg, buffer, wcount) \
__ide_insw(data_reg, buffer, (wcount)<<1)
#define __ide_outsl(data_reg, buffer, wcount) \
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7f7c0c4e024f..d9b2034ed1d2 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -1,53 +1 @@
-#ifndef _SPARC64_SEMAPHORE_H
-#define _SPARC64_SEMAPHORE_H
-
-/* These are actually reasonable on the V9.
- *
- * See asm-ppc/semaphore.h for implementation commentary,
- * only sparc64 specific issues are commented here.
- */
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, count) \
- { ATOMIC_INIT(count), \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void up(struct semaphore *sem);
-extern void down(struct semaphore *sem);
-extern int down_trylock(struct semaphore *sem);
-extern int down_interruptible(struct semaphore *sem);
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_SPARC64_SEMAPHORE_H) */
+#include <linux/semaphore.h>
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h
index ff13c34de421..d9b2034ed1d2 100644
--- a/include/asm-um/semaphore.h
+++ b/include/asm-um/semaphore.h
@@ -1,6 +1 @@
-#ifndef __UM_SEMAPHORE_H
-#define __UM_SEMAPHORE_H
-
-#include "asm/arch/semaphore.h"
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h
index 39fc475df6c9..5240fa1c5e08 100644
--- a/include/asm-um/tlb.h
+++ b/include/asm-um/tlb.h
@@ -1,6 +1,7 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
+#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/percpu.h>
#include <asm/pgalloc.h>
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index 10ed0ccf37df..d9b2034ed1d2 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -1,84 +1 @@
-#ifndef __V850_SEMAPHORE_H__
-#define __V850_SEMAPHORE_H__
-
-#include <linux/linkage.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
- { ATOMIC_INIT (count), 0, \
- __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER (name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init (sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init (sem, 0);
-}
-
-/*
- * special register calling convention
- */
-asmlinkage void __down_failed (void);
-asmlinkage int __down_interruptible_failed (void);
-asmlinkage int __down_trylock_failed (void);
-asmlinkage void __up_wakeup (void);
-
-extern void __down (struct semaphore * sem);
-extern int __down_interruptible (struct semaphore * sem);
-extern int __down_trylock (struct semaphore * sem);
-extern void __up (struct semaphore * sem);
-
-static inline void down (struct semaphore * sem)
-{
- might_sleep();
- if (atomic_dec_return (&sem->count) < 0)
- __down (sem);
-}
-
-static inline int down_interruptible (struct semaphore * sem)
-{
- int ret = 0;
- might_sleep();
- if (atomic_dec_return (&sem->count) < 0)
- ret = __down_interruptible (sem);
- return ret;
-}
-
-static inline int down_trylock (struct semaphore *sem)
-{
- int ret = 0;
- if (atomic_dec_return (&sem->count) < 0)
- ret = __down_trylock (sem);
- return ret;
-}
-
-static inline void up (struct semaphore * sem)
-{
- if (atomic_inc_return (&sem->count) <= 0)
- __up (sem);
-}
-
-#endif /* __V850_SEMAPHORE_H__ */
+#include <linux/semaphore.h>
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 3b8160a2b47e..1e3554596f72 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -10,6 +10,7 @@ header-y += prctl.h
header-y += ptrace-abi.h
header-y += sigcontext32.h
header-y += ucontext.h
+header-y += processor-flags.h
unifdef-y += e820.h
unifdef-y += ist.h
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
index d2b6e11d3e97..714207a1c387 100644
--- a/include/asm-x86/a.out-core.h
+++ b/include/asm-x86/a.out-core.h
@@ -29,8 +29,9 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->magic = CMAGIC;
dump->start_code = 0;
dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
+ >> PAGE_SHIFT;
dump->u_dsize -= dump->u_tsize;
dump->u_ssize = 0;
dump->u_debugreg[0] = current->thread.debugreg0;
@@ -43,7 +44,8 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->u_debugreg[7] = current->thread.debugreg7;
if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+ dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
+ >> PAGE_SHIFT;
dump->regs.bx = regs->bx;
dump->regs.cx = regs->cx;
@@ -55,7 +57,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
- savesegment(gs,gs);
+ savesegment(gs, gs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;
@@ -63,7 +65,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.sp = regs->sp;
dump->regs.ss = (u16)regs->ss;
- dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+ dump->u_fpvalid = dump_fpu(regs, &dump->i387);
}
#endif /* CONFIG_X86_32 */
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 7a72d6aa50be..14411c9de46f 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -67,16 +67,16 @@ int __acpi_release_global_lock(unsigned int *lock);
*/
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
asm("divl %2;" \
- :"=a"(q32), "=d"(r32) \
- :"r"(d32), \
+ : "=a"(q32), "=d"(r32) \
+ : "r"(d32), \
"0"(n_lo), "1"(n_hi))
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
asm("shrl $1,%2 ;" \
"rcrl $1,%3;" \
- :"=r"(n_hi), "=r"(n_lo) \
- :"0"(n_hi), "1"(n_lo))
+ : "=r"(n_hi), "=r"(n_lo) \
+ : "0"(n_hi), "1"(n_lo))
#ifdef CONFIG_ACPI
extern int acpi_lapic;
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index d8bacf3c4b08..1f6a9ca10126 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -66,8 +66,8 @@ extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
- void *locks, void *locks_end,
- void *text, void *text_end) {}
+ void *locks, void *locks_end,
+ void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif /* CONFIG_SMP */
@@ -148,14 +148,34 @@ struct paravirt_patch_site;
void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end);
#else
-static inline void
-apply_paravirt(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end)
+static inline void apply_paravirt(struct paravirt_patch_site *start,
+ struct paravirt_patch_site *end)
{}
#define __parainstructions NULL
#define __parainstructions_end NULL
#endif
-extern void text_poke(void *addr, unsigned char *opcode, int len);
+extern void add_nops(void *insns, unsigned int len);
+
+/*
+ * Clear and restore the kernel write-protection flag on the local CPU.
+ * Allows the kernel to edit read-only pages.
+ * Side-effect: any interrupt handler running between save and restore will have
+ * the ability to write to read-only pages.
+ *
+ * Warning:
+ * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
+ * no thread can be preempted in the instructions being modified (no iret to an
+ * invalid instruction possible) or if the instructions are changed from a
+ * consistent state to another consistent state atomically.
+ * More care must be taken when modifying code in the SMP case because of
+ * Intel's errata.
+ * On the local CPU you need to be protected again NMI or MCE handlers seeing an
+ * inconsistent instruction while you patch.
+ * The _early version expects the memory to already be RW.
+ */
+
+extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index bcfc07fd3661..be9639a9a186 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -44,7 +44,6 @@ extern int apic_runs_main_timer;
extern int ioapic_force;
extern int disable_apic;
extern int disable_apic_timer;
-extern unsigned boot_cpu_id;
/*
* Basic functions accessing APICs.
@@ -59,6 +58,8 @@ extern unsigned boot_cpu_id;
#define setup_secondary_clock setup_secondary_APIC_clock
#endif
+extern int is_vsmp_box(void);
+
static inline void native_apic_write(unsigned long reg, u32 v)
{
*((volatile u32 *)(APIC_BASE + reg)) = v;
@@ -66,7 +67,7 @@ static inline void native_apic_write(unsigned long reg, u32 v)
static inline void native_apic_write_atomic(unsigned long reg, u32 v)
{
- (void) xchg((u32*)(APIC_BASE + reg), v);
+ (void)xchg((u32 *)(APIC_BASE + reg), v);
}
static inline u32 native_apic_read(unsigned long reg)
@@ -123,7 +124,7 @@ extern void enable_NMI_through_LVT0(void);
* On 32bit this is mach-xxx local
*/
#ifdef CONFIG_X86_64
-extern void setup_apic_routing(void);
+extern void early_init_lapic_mapping(void);
#endif
extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index 550af7a6f88e..6b9008c78731 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -12,17 +12,15 @@
#define APIC_ID 0x20
-#ifdef CONFIG_X86_64
-# define APIC_ID_MASK (0xFFu<<24)
-# define GET_APIC_ID(x) (((x)>>24)&0xFFu)
-# define SET_APIC_ID(x) (((x)<<24))
-#endif
-
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
-#define GET_APIC_VERSION(x) ((x)&0xFFu)
-#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
-#define APIC_INTEGRATED(x) ((x)&0xF0u)
+#define GET_APIC_VERSION(x) ((x) & 0xFFu)
+#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
+#ifdef CONFIG_X86_32
+# define APIC_INTEGRATED(x) ((x) & 0xF0u)
+#else
+# define APIC_INTEGRATED(x) (1)
+#endif
#define APIC_XAPIC(x) ((x) >= 0x14)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
@@ -33,16 +31,16 @@
#define APIC_EIO_ACK 0x0
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
-#define APIC_LDR_MASK (0xFFu<<24)
-#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
-#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
+#define APIC_LDR_MASK (0xFFu << 24)
+#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
+#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
-#define APIC_SPIV_FOCUS_DISABLED (1<<9)
-#define APIC_SPIV_APIC_ENABLED (1<<8)
+#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
+#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
@@ -78,27 +76,27 @@
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
-#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
-#define SET_APIC_DEST_FIELD(x) ((x)<<24)
+#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
+#define SET_APIC_DEST_FIELD(x) ((x) << 24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
-#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
-#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
-#define SET_APIC_TIMER_BASE(x) (((x)<<18))
+#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
+#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
+#define SET_APIC_TIMER_BASE(x) (((x) << 18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
-#define APIC_LVT_TIMER_PERIODIC (1<<17)
-#define APIC_LVT_MASKED (1<<16)
-#define APIC_LVT_LEVEL_TRIGGER (1<<15)
-#define APIC_LVT_REMOTE_IRR (1<<14)
-#define APIC_INPUT_POLARITY (1<<13)
-#define APIC_SEND_PENDING (1<<12)
+#define APIC_LVT_TIMER_PERIODIC (1 << 17)
+#define APIC_LVT_MASKED (1 << 16)
+#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
+#define APIC_LVT_REMOTE_IRR (1 << 14)
+#define APIC_INPUT_POLARITY (1 << 13)
+#define APIC_SEND_PENDING (1 << 12)
#define APIC_MODE_MASK 0x700
-#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
-#define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8))
+#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
+#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
@@ -107,7 +105,7 @@
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
-#define APIC_TDR_DIV_TMBASE (1<<2)
+#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
@@ -117,14 +115,14 @@
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_EILVT0 0x500
-#define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */
+#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
-#define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF)
+#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
-#define APIC_EILVT_MASKED (1<<16)
+#define APIC_EILVT_MASKED (1 << 16)
#define APIC_EILVT1 0x510
#define APIC_EILVT2 0x520
#define APIC_EILVT3 0x530
@@ -135,7 +133,7 @@
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
-# define MAX_LOCAL_APIC 256
+# define MAX_LOCAL_APIC 32768
#endif
/*
@@ -408,6 +406,9 @@ struct local_apic {
#undef u32
-#define BAD_APICID 0xFFu
-
+#ifdef CONFIG_X86_32
+ #define BAD_APICID 0xFFu
+#else
+ #define BAD_APICID 0xFFFFu
+#endif
#endif
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
index 437aac801711..21a4825148c0 100644
--- a/include/asm-x86/atomic_32.h
+++ b/include/asm-x86/atomic_32.h
@@ -15,138 +15,133 @@
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
-typedef struct { int counter; } atomic_t;
+typedef struct {
+ int counter;
+} atomic_t;
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically reads the value of @v.
- */
+ */
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
- *
+ *
* Atomically sets the value of @v to @i.
- */
-#define atomic_set(v,i) (((v)->counter) = (i))
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
- *
+ *
* Atomically adds @i to @v.
*/
-static __inline__ void atomic_add(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
- :"+m" (v->counter)
- :"ir" (i));
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
}
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
- :"+m" (v->counter)
- :"ir" (i));
+ asm volatile(LOCK_PREFIX "subl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
+ */
+static inline void atomic_inc(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0"
- :"+m" (v->counter));
+ asm volatile(LOCK_PREFIX "incl %0"
+ : "+m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
+ */
+static inline void atomic_dec(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0"
- :"+m" (v->counter));
+ asm volatile(LOCK_PREFIX "decl %0"
+ : "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- : : "memory");
+ asm volatile(LOCK_PREFIX "decl %0; sete %1"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
/**
- * atomic_inc_and_test - increment and test
+ * atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0; sete %1"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
@@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
- *
+ *
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
- :"+m" (v->counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
@@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
unsigned long flags;
- if(unlikely(boot_cpu_data.x86 <= 3))
+ if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
#ifdef CONFIG_M386
@@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */
*
* Atomically subtracts @i from @v and returns @v - @i
*/
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- return atomic_add_return(-i,v);
+ return atomic_add_return(-i, v);
}
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
@@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
+#define atomic_inc_return(v) (atomic_add_return(1, v))
+#define atomic_dec_return(v) (atomic_sub_return(1, v))
/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
+#define atomic_clear_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "andl %0,%1" \
+ : : "r" (~(mask)), "m" (*(addr)) : "memory")
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" (mask),"m" (*(addr)) : "memory")
+#define atomic_set_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "orl %0,%1" \
+ : : "r" (mask), "m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 2d20a7a19f62..3e0cd7d38335 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -22,140 +22,135 @@
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
-typedef struct { int counter; } atomic_t;
+typedef struct {
+ int counter;
+} atomic_t;
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically reads the value of @v.
- */
+ */
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
- *
+ *
* Atomically sets the value of @v to @i.
- */
-#define atomic_set(v,i) (((v)->counter) = (i))
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
- *
+ *
* Atomically adds @i to @v.
*/
-static __inline__ void atomic_add(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "subl %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
+ */
+static inline void atomic_inc(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "incl %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
+ */
+static inline void atomic_dec(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "decl %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "decl %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}
/**
- * atomic_inc_and_test - increment and test
+ * atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "incl %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}
@@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
- *
+ *
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}
@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- return atomic_add_return(-i,v);
+ return atomic_add_return(-i, v);
}
-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
+#define atomic_inc_return(v) (atomic_add_return(1, v))
+#define atomic_dec_return(v) (atomic_sub_return(1, v))
/* An 64bit atomic type */
-typedef struct { long counter; } atomic64_t;
+typedef struct {
+ long counter;
+} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t;
*
* Atomically sets the value of @v to @i.
*/
-#define atomic64_set(v,i) (((v)->counter) = (i))
+#define atomic64_set(v, i) (((v)->counter) = (i))
/**
* atomic64_add - add integer to atomic64 variable
@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t;
*
* Atomically adds @i to @v.
*/
-static __inline__ void atomic64_add(long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "addq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}
/**
@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
*
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic64_sub(long i, atomic64_t *v)
+static inline void atomic64_sub(long i, atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "subq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "subq %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}
/**
@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
+static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "subq %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}
@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
-static __inline__ void atomic64_inc(atomic64_t *v)
+static inline void atomic64_inc(atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "incq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}
/**
@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
-static __inline__ void atomic64_dec(atomic64_t *v)
+static inline void atomic64_dec(atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "decq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "decq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}
/**
@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static __inline__ int atomic64_dec_and_test(atomic64_t *v)
+static inline int atomic64_dec_and_test(atomic64_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "decq %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "decq %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}
@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic64_inc_and_test(atomic64_t *v)
+static inline int atomic64_inc_and_test(atomic64_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "incq %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "incq %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}
@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
+static inline int atomic64_add_negative(long i, atomic64_t *v)
{
unsigned char c;
- __asm__ __volatile__(
- LOCK_PREFIX "addq %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}
@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static __inline__ long atomic64_add_return(long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddq %0, %1;"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xaddq %0, %1;"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}
-static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
+static inline long atomic64_sub_return(long i, atomic64_t *v)
{
- return atomic64_add_return(-i,v);
+ return atomic64_add_return(-i, v);
}
-#define atomic64_inc_return(v) (atomic64_add_return(1,v))
-#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
+#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
+#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is a given value
@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
@@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
-
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
+#define atomic_clear_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "andl %0,%1" \
+ : : "r" (~(mask)), "m" (*(addr)) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "orl %0,%1" \
+ : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+ : "memory")
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/include/asm-x86/mach-default/bios_ebda.h b/include/asm-x86/bios_ebda.h
index 9cbd9a668af8..9cbd9a668af8 100644
--- a/include/asm-x86/mach-default/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 1a23ce1a5697..1ae7b270a1ef 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -23,10 +23,13 @@
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
-#define ADDR "=m" (*(volatile long *) addr)
+#define ADDR "=m" (*(volatile long *)addr)
+#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
#else
#define ADDR "+m" (*(volatile long *) addr)
+#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
#endif
+#define BASE_ADDR "m" (*(volatile int *)addr)
/**
* set_bit - Atomically set a bit in memory
@@ -45,9 +48,7 @@
*/
static inline void set_bit(int nr, volatile void *addr)
{
- asm volatile(LOCK_PREFIX "bts %1,%0"
- : ADDR
- : "Ir" (nr) : "memory");
+ asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
/**
@@ -79,9 +80,7 @@ static inline void __set_bit(int nr, volatile void *addr)
*/
static inline void clear_bit(int nr, volatile void *addr)
{
- asm volatile(LOCK_PREFIX "btr %1,%0"
- : ADDR
- : "Ir" (nr));
+ asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
}
/*
@@ -100,7 +99,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
static inline void __clear_bit(int nr, volatile void *addr)
{
- asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
+ asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
}
/*
@@ -135,7 +134,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
*/
static inline void __change_bit(int nr, volatile void *addr)
{
- asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
+ asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
}
/**
@@ -149,8 +148,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*/
static inline void change_bit(int nr, volatile void *addr)
{
- asm volatile(LOCK_PREFIX "btc %1,%0"
- : ADDR : "Ir" (nr));
+ asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
}
/**
@@ -166,9 +164,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
int oldbit;
asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
@@ -198,10 +194,9 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
{
int oldbit;
- asm("bts %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr));
+ asm volatile("bts %2,%3\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
return oldbit;
}
@@ -219,8 +214,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
"sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
@@ -238,10 +232,9 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
{
int oldbit;
- asm volatile("btr %2,%1\n\t"
+ asm volatile("btr %2,%3\n\t"
"sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr));
+ : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
return oldbit;
}
@@ -250,10 +243,9 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
{
int oldbit;
- asm volatile("btc %2,%1\n\t"
+ asm volatile("btc %2,%3\n\t"
"sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
return oldbit;
}
@@ -272,8 +264,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
"sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
@@ -288,10 +279,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
{
int oldbit;
- asm volatile("bt %2,%1\n\t"
+ asm volatile("bt %2,%3\n\t"
"sbb %0,%0"
: "=r" (oldbit)
- : "m" (*(unsigned long *)addr), "Ir" (nr));
+ : "m" (((volatile const int *)addr)[nr >> 5]),
+ "Ir" (nr), BASE_ADDR);
return oldbit;
}
@@ -310,6 +302,8 @@ static int test_bit(int nr, const volatile unsigned long *addr);
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
+#undef BASE_ADDR
+#undef BIT_ADDR
#undef ADDR
#ifdef CONFIG_X86_32
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index e4d75fcf9c03..2513a81f82aa 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -20,20 +20,22 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
if (!size)
return 0;
- /* This looks at memory. Mark it volatile to tell gcc not to move it around */
- __asm__ __volatile__(
- "movl $-1,%%eax\n\t"
- "xorl %%edx,%%edx\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "xorl -4(%%edi),%%eax\n\t"
- "subl $4,%%edi\n\t"
- "bsfl %%eax,%%edx\n"
- "1:\tsubl %%ebx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%edx"
- :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
- :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
+ /* This looks at memory.
+ * Mark it volatile to tell gcc not to move it around
+ */
+ asm volatile("movl $-1,%%eax\n\t"
+ "xorl %%edx,%%edx\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "xorl -4(%%edi),%%eax\n\t"
+ "subl $4,%%edi\n\t"
+ "bsfl %%eax,%%edx\n"
+ "1:\tsubl %%ebx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%edx"
+ : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ : "1" ((size + 31) >> 5), "2" (addr),
+ "b" (addr) : "memory");
return res;
}
@@ -75,7 +77,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
unsigned long val = *addr++;
if (val)
return __ffs(val) + x;
- x += (sizeof(*addr)<<3);
+ x += sizeof(*addr) << 3;
}
return x;
}
@@ -152,10 +154,10 @@ static inline int fls(int x)
#include <asm-generic/bitops/ext2-non-atomic.h>
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (unsigned long *)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (unsigned long *)addr)
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index aaf15194d536..365f8207ea59 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -17,35 +17,35 @@ static inline long __scanbit(unsigned long val, unsigned long max)
return val;
}
-#define find_first_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- (__scanbit(*(unsigned long *)addr,(size))) : \
- find_first_bit(addr,size)))
-
#define find_next_bit(addr,size,off) \
((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
find_next_bit(addr,size,off)))
-#define find_first_zero_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- (__scanbit(~*(unsigned long *)addr,(size))) : \
- find_first_zero_bit(addr,size)))
-
#define find_next_zero_bit(addr,size,off) \
((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
find_next_zero_bit(addr,size,off)))
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
+#define find_first_bit(addr, size) \
+ ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
+ ? (__scanbit(*(unsigned long *)(addr), (size))) \
+ : find_first_bit((addr), (size))))
+
+#define find_first_zero_bit(addr, size) \
+ ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
+ ? (__scanbit(~*(unsigned long *)(addr), (size))) \
+ : find_first_zero_bit((addr), (size))))
+
+static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
+ int len)
+{
+ unsigned long end = i + len;
while (i < end) {
- __set_bit(i, bitmap);
+ __set_bit(i, bitmap);
i++;
}
-}
+}
/**
* ffz - find first zero in word.
@@ -150,10 +150,10 @@ static inline int fls(int x)
#include <asm-generic/bitops/ext2-non-atomic.h>
-#define ext2_set_bit_atomic(lock,nr,addr) \
- test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock,nr,addr) \
- test_and_clear_bit((nr),(unsigned long*)addr)
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index 8d477a201392..b69aa64b82a4 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -12,25 +12,25 @@
# define __BUG_C0 "2:\t.quad 1b, %c0\n"
#endif
-#define BUG() \
- do { \
- asm volatile("1:\tud2\n" \
- ".pushsection __bug_table,\"a\"\n" \
- __BUG_C0 \
- "\t.word %c1, 0\n" \
- "\t.org 2b+%c2\n" \
- ".popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (sizeof(struct bug_entry))); \
- for(;;) ; \
- } while(0)
+#define BUG() \
+do { \
+ asm volatile("1:\tud2\n" \
+ ".pushsection __bug_table,\"a\"\n" \
+ __BUG_C0 \
+ "\t.word %c1, 0\n" \
+ "\t.org 2b+%c2\n" \
+ ".popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (sizeof(struct bug_entry))); \
+ for (;;) ; \
+} while (0)
#else
-#define BUG() \
- do { \
- asm volatile("ud2"); \
- for(;;) ; \
- } while(0)
+#define BUG() \
+do { \
+ asm volatile("ud2"); \
+ for (;;) ; \
+} while (0)
#endif
#endif /* !CONFIG_BUG */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index fe2f2e5d51ba..e02ae2d89acf 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -8,50 +8,59 @@
#ifdef __i386__
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
#ifdef CONFIG_X86_BSWAP
- __asm__("bswap %0" : "=r" (x) : "0" (x));
+ asm("bswap %0" : "=r" (x) : "0" (x));
#else
- __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
- "rorl $16,%0\n\t" /* swap words */
- "xchgb %b0,%h0" /* swap higher bytes */
- :"=q" (x)
- : "0" (x));
+ asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
+ "rorl $16,%0\n\t" /* swap words */
+ "xchgb %b0,%h0" /* swap higher bytes */
+ : "=q" (x)
+ : "0" (x));
#endif
return x;
}
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
{
union {
- struct { __u32 a,b; } s;
+ struct {
+ __u32 a;
+ __u32 b;
+ } s;
__u64 u;
} v;
v.u = val;
#ifdef CONFIG_X86_BSWAP
- __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+ asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
#else
v.s.a = ___arch__swab32(v.s.a);
v.s.b = ___arch__swab32(v.s.b);
- __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
+ asm("xchgl %0,%1"
+ : "=r" (v.s.a), "=r" (v.s.b)
+ : "0" (v.s.a), "1" (v.s.b));
#endif
return v.u;
}
#else /* __i386__ */
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
- __asm__("bswapq %0" : "=r" (x) : "0" (x));
+ asm("bswapq %0"
+ : "=r" (x)
+ : "0" (x));
return x;
}
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
- __asm__("bswapl %0" : "=r" (x) : "0" (x));
+ asm("bswapl %0"
+ : "=r" (x)
+ : "0" (x));
return x;
}
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 5396c212d8c0..f4c0ab50d2c2 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -14,33 +14,85 @@
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
-int __deprecated_for_modules change_page_attr(struct page *page, int numpages,
- pgprot_t prot);
-int set_pages_uc(struct page *page, int numpages);
-int set_pages_wb(struct page *page, int numpages);
-int set_pages_x(struct page *page, int numpages);
-int set_pages_nx(struct page *page, int numpages);
-int set_pages_ro(struct page *page, int numpages);
-int set_pages_rw(struct page *page, int numpages);
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability : UnCached, WriteCombining, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write : ReadOnly, ReadWrite
+ * Presence : NotPresent
+ *
+ * Within a catagory, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ * coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ * operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ * in any state, other than that these do not violate rules for
+ * the CPU you have. Do not depend on any effects on other mappings,
+ * CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+int _set_memory_uc(unsigned long addr, int numpages);
+int _set_memory_wc(unsigned long addr, int numpages);
+int _set_memory_wb(unsigned long addr, int numpages);
int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+/*
+ * For legacy compatibility with the old APIs, a few functions
+ * are provided that work on a "struct page".
+ * These functions operate ONLY on the 1:1 kernel mapping of the
+ * memory that the struct page represents, and internally just
+ * call the set_memory_* function. See the description of the
+ * set_memory_* function for more details on conventions.
+ *
+ * These APIs should be considered *deprecated* and are likely going to
+ * be removed in the future.
+ * The reason for this is the implicit operation on the 1:1 mapping only,
+ * making this not a generally useful API.
+ *
+ * Specifically, many users of the old APIs had a virtual address,
+ * called virt_to_page() or vmalloc_to_page() on that address to
+ * get a struct page* that the old API required.
+ * To convert these cases, use set_memory_*() on the original
+ * virtual address, do not use these functions.
+ */
+
+int set_pages_uc(struct page *page, int numpages);
+int set_pages_wb(struct page *page, int numpages);
+int set_pages_x(struct page *page, int numpages);
+int set_pages_nx(struct page *page, int numpages);
+int set_pages_ro(struct page *page, int numpages);
+int set_pages_rw(struct page *page, int numpages);
+
void clflush_cache_range(void *addr, unsigned int size);
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
index 75194abbe8ee..52bbb0d8c4c1 100644
--- a/include/asm-x86/checksum_32.h
+++ b/include/asm-x86/checksum_32.h
@@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* If you use these functions directly please don't forget the
* access_ok().
*/
-static __inline__
-__wsum csum_partial_copy_nocheck (const void *src, void *dst,
- int len, __wsum sum)
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
- return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
-static __inline__
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+static inline __wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst,
+ int len, __wsum sum,
+ int *err_ptr)
{
might_sleep();
return csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ len, sum, err_ptr, NULL);
}
/*
@@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
- __asm__ __volatile__(
- "movl (%1), %0 ;\n"
- "subl $4, %2 ;\n"
- "jbe 2f ;\n"
- "addl 4(%1), %0 ;\n"
- "adcl 8(%1), %0 ;\n"
- "adcl 12(%1), %0 ;\n"
-"1: adcl 16(%1), %0 ;\n"
- "lea 4(%1), %1 ;\n"
- "decl %2 ;\n"
- "jne 1b ;\n"
- "adcl $0, %0 ;\n"
- "movl %0, %2 ;\n"
- "shrl $16, %0 ;\n"
- "addw %w2, %w0 ;\n"
- "adcl $0, %0 ;\n"
- "notl %0 ;\n"
-"2: ;\n"
+ asm volatile("movl (%1), %0 ;\n"
+ "subl $4, %2 ;\n"
+ "jbe 2f ;\n"
+ "addl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0;\n"
+ "1: adcl 16(%1), %0 ;\n"
+ "lea 4(%1), %1 ;\n"
+ "decl %2 ;\n"
+ "jne 1b ;\n"
+ "adcl $0, %0 ;\n"
+ "movl %0, %2 ;\n"
+ "shrl $16, %0 ;\n"
+ "addw %w2, %w0 ;\n"
+ "adcl $0, %0 ;\n"
+ "notl %0 ;\n"
+ "2: ;\n"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl)
- : "1" (iph), "2" (ihl)
- : "memory");
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
return (__force __sum16)sum;
}
@@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
static inline __sum16 csum_fold(__wsum sum)
{
- __asm__(
- "addl %1, %0 ;\n"
- "adcl $0xffff, %0 ;\n"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000)
- );
+ asm("addl %1, %0 ;\n"
+ "adcl $0xffff, %0 ;\n"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
- __asm__(
- "addl %1, %0 ;\n"
- "adcl %2, %0 ;\n"
- "adcl %3, %0 ;\n"
- "adcl $0, %0 ;\n"
- : "=r" (sum)
- : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
- return sum;
+ asm("addl %1, %0 ;\n"
+ "adcl %2, %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=r" (sum)
+ : "g" (daddr), "g"(saddr),
+ "g" ((len + proto) << 8), "0" (sum));
+ return sum;
}
/*
@@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
@@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold (csum_partial(buff, len, 0));
+ return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
{
- __asm__(
- "addl 0(%1), %0 ;\n"
- "adcl 4(%1), %0 ;\n"
- "adcl 8(%1), %0 ;\n"
- "adcl 12(%1), %0 ;\n"
- "adcl 0(%2), %0 ;\n"
- "adcl 4(%2), %0 ;\n"
- "adcl 8(%2), %0 ;\n"
- "adcl 12(%2), %0 ;\n"
- "adcl %3, %0 ;\n"
- "adcl %4, %0 ;\n"
- "adcl $0, %0 ;\n"
- : "=&r" (sum)
- : "r" (saddr), "r" (daddr),
- "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
+ asm("addl 0(%1), %0 ;\n"
+ "adcl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0 ;\n"
+ "adcl 0(%2), %0 ;\n"
+ "adcl 4(%2), %0 ;\n"
+ "adcl 8(%2), %0 ;\n"
+ "adcl 12(%2), %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl %4, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=&r" (sum)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
return csum_fold(sum);
}
@@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+static inline __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum,
+ int *err_ptr)
{
might_sleep();
if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
+ return csum_partial_copy_generic(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
if (len)
*err_ptr = -EFAULT;
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
index e5f79997decc..8bd861cc5267 100644
--- a/include/asm-x86/checksum_64.h
+++ b/include/asm-x86/checksum_64.h
@@ -1,33 +1,31 @@
#ifndef _X86_64_CHECKSUM_H
#define _X86_64_CHECKSUM_H
-/*
- * Checksums for x86-64
- * Copyright 2002 by Andi Kleen, SuSE Labs
+/*
+ * Checksums for x86-64
+ * Copyright 2002 by Andi Kleen, SuSE Labs
* with some code from asm-x86/checksum.h
- */
+ */
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-/**
+/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
- *
+ *
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
static inline __sum16 csum_fold(__wsum sum)
{
- __asm__(
- " addl %1,%0\n"
- " adcl $0xffff,%0"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000)
- );
+ asm(" addl %1,%0\n"
+ " adcl $0xffff,%0"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}
@@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum)
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* iph: ipv4 header
* ihl: length of header / 4
- */
+ */
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
- asm( " movl (%1), %0\n"
- " subl $4, %2\n"
- " jbe 2f\n"
- " addl 4(%1), %0\n"
- " adcl 8(%1), %0\n"
- " adcl 12(%1), %0\n"
- "1: adcl 16(%1), %0\n"
- " lea 4(%1), %1\n"
- " decl %2\n"
- " jne 1b\n"
- " adcl $0, %0\n"
- " movl %0, %2\n"
- " shrl $16, %0\n"
- " addw %w2, %w0\n"
- " adcl $0, %0\n"
- " notl %0\n"
- "2:"
+ asm(" movl (%1), %0\n"
+ " subl $4, %2\n"
+ " jbe 2f\n"
+ " addl 4(%1), %0\n"
+ " adcl 8(%1), %0\n"
+ " adcl 12(%1), %0\n"
+ "1: adcl 16(%1), %0\n"
+ " lea 4(%1), %1\n"
+ " decl %2\n"
+ " jne 1b\n"
+ " adcl $0, %0\n"
+ " movl %0, %2\n"
+ " shrl $16, %0\n"
+ " addw %w2, %w0\n"
+ " adcl $0, %0\n"
+ " notl %0\n"
+ "2:"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl)
- : "1" (iph), "2" (ihl)
- : "memory");
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
return (__force __sum16)sum;
}
-/**
+/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded)
- *
- * Returns the pseudo header checksum the input data. Result is
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
static inline __wsum
@@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
- : "=r" (sum)
+ : "=r" (sum)
: "g" (daddr), "g" (saddr),
"g" ((len + proto)<<8), "0" (sum));
- return sum;
+ return sum;
}
-/**
+/**
* csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded)
- *
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
* Returns the 16bit pseudo header checksum the input data already
* complemented and ready to be filled in.
*/
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len, unsigned short proto, __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto, __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
-/**
+/**
* csum_partial - Compute an internet checksum.
* @buff: buffer to be checksummed
* @len: length of buffer.
@@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr,
* Returns the 32bit unfolded internet checksum of the buffer.
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
- */
+ */
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
@@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Use the wrappers below */
extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
- int len,
- __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp);
+ int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
+ int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
/* Old names. To be removed. */
#define csum_and_copy_to_user csum_partial_copy_to_user
#define csum_and_copy_from_user csum_partial_copy_from_user
-/**
+/**
* ip_compute_csum - Compute an 16bit IP checksum.
* @buff: buffer address.
* @len: length of buffer.
@@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len);
* @proto: protocol of packet
* @sum: initial sum (32bit unfolded) to be added in
*
- * Computes an IPv6 pseudo header checksum. This sum is added the checksum
+ * Computes an IPv6 pseudo header checksum. This sum is added the checksum
* into UDP/TCP packets and contains some link layer information.
* Returns the unfolded 32bit checksum.
*/
@@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
- "adcl $0,%0"
- : "=r" (a)
+ "adcl $0,%0"
+ : "=r" (a)
: "0" (a), "r" (b));
return a;
}
#endif
-
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index 959fad00dff5..bf5a69d1329e 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -8,9 +8,12 @@
* you need to test for the feature in boot_cpu_data.
*/
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
-struct __xchg_dummy { unsigned long a[100]; };
+struct __xchg_dummy {
+ unsigned long a[100];
+};
#define __xg(x) ((struct __xchg_dummy *)(x))
/*
@@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; };
* of the instruction set reference 24319102.pdf. We need
* the reader side to see the coherent 64bit value.
*/
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
+static inline void __set_64bit(unsigned long long *ptr,
+ unsigned int low, unsigned int high)
{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
+ asm volatile("\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ LOCK_PREFIX "cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax", "dx", "memory");
}
-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
+static inline void __set_64bit_constant(unsigned long long *ptr,
+ unsigned long long value)
{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+ __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)
-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
+#define ll_low(x) *(((unsigned int *)&(x)) + 0)
+#define ll_high(x) *(((unsigned int *)&(x)) + 1)
+
+static inline void __set_64bit_var(unsigned long long *ptr,
+ unsigned long long value)
{
- __set_64bit(ptr,ll_low(value), ll_high(value));
+ __set_64bit(ptr, ll_low(value), ll_high(value));
}
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
+#define set_64bit(ptr, value) \
+ (__builtin_constant_p((value)) \
+ ? __set_64bit_constant((ptr), (value)) \
+ : __set_64bit_var((ptr), (value)))
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
+#define _set_64bit(ptr, value) \
+ (__builtin_constant_p(value) \
+ ? __set_64bit(ptr, (unsigned int)(value), \
+ (unsigned int)((value) >> 32)) \
+ : __set_64bit(ptr, ll_low((value)), ll_high((value))))
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
{
switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
+ case 1:
+ asm volatile("xchgb %b0,%1"
+ : "=q" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 2:
+ asm volatile("xchgw %w0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 4:
+ asm volatile("xchgl %0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
}
return x;
}
@@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#define sync_cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+#define sync_cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
#endif
#ifdef CONFIG_X86_CMPXCHG64
-#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
- (unsigned long long)(n)))
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
- (unsigned long long)(n)))
+#define cmpxchg64(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
+ (unsigned long long)(n)))
+#define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
+ (unsigned long long)(n)))
#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
@@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* isn't.
*/
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
- unsigned long old,
- unsigned long new, int size)
+ unsigned long old,
+ unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("lock; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("lock; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("lock; cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
}
static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old, unsigned long new, int size)
+ unsigned long old,
+ unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__("cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
}
static inline unsigned long long __cmpxchg64(volatile void *ptr,
- unsigned long long old, unsigned long long new)
+ unsigned long long old,
+ unsigned long long new)
{
unsigned long long prev;
- __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
- "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
return prev;
}
static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
- unsigned long long old, unsigned long long new)
+ unsigned long long old,
+ unsigned long long new)
{
unsigned long long prev;
- __asm__ __volatile__("cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
- "0"(old)
- : "memory");
+ asm volatile("cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
return prev;
}
@@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
+ unsigned long new, int size)
{
switch (size) {
case 1:
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index 56f5b41e071c..d9b26b9a28cf 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -3,7 +3,8 @@
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
+ (ptr), sizeof(*(ptr))))
#define __xg(x) ((volatile long *)(x))
@@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
{
switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
+ case 1:
+ asm volatile("xchgb %b0,%1"
+ : "=q" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 2:
+ asm volatile("xchgw %w0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 4:
+ asm volatile("xchgl %k0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 8:
+ asm volatile("xchgq %0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
}
return x;
}
@@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
}
static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old, unsigned long new, int size)
+ unsigned long old,
+ unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__("cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 8:
- __asm__ __volatile__("cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
@@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+ (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ({ \
+({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
- })
+})
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) \
- ({ \
+({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
- })
+})
#endif
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index d3e8f3e87ee8..1793ac317a30 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-static __inline__ void __user *compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 065e92966c7c..0d609c837a41 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -120,6 +120,9 @@
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
+#define test_cpu_cap(c, bit) \
+ test_bit(bit, (unsigned long *)((c)->x86_capability))
+
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
@@ -131,7 +134,8 @@ extern const char * const x86_power_flags[32];
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
? 1 : \
- test_bit(bit, (unsigned long *)((c)->x86_capability)))
+ test_cpu_cap(c, bit))
+
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
@@ -181,6 +185,8 @@ extern const char * const x86_power_flags[32];
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
+#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
index d35248539912..5af9bdb97a16 100644
--- a/include/asm-x86/current_32.h
+++ b/include/asm-x86/current_32.h
@@ -11,7 +11,7 @@ static __always_inline struct task_struct *get_current(void)
{
return x86_read_percpu(current_task);
}
-
+
#define current get_current()
#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
index bc8adecee66d..2d368ede2fc1 100644
--- a/include/asm-x86/current_64.h
+++ b/include/asm-x86/current_64.h
@@ -1,23 +1,23 @@
#ifndef _X86_64_CURRENT_H
#define _X86_64_CURRENT_H
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLY__)
struct task_struct;
#include <asm/pda.h>
-static inline struct task_struct *get_current(void)
-{
- struct task_struct *t = read_pda(pcurrent);
+static inline struct task_struct *get_current(void)
+{
+ struct task_struct *t = read_pda(pcurrent);
return t;
-}
+}
#define current get_current()
#else
#ifndef ASM_OFFSET_H
-#include <asm/asm-offsets.h>
+#include <asm/asm-offsets.h>
#endif
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index 5b6a05d3a771..268a012bcd79 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -62,8 +62,8 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
}
static inline void pack_gate(gate_desc *gate, unsigned char type,
- unsigned long base, unsigned dpl, unsigned flags, unsigned short seg)
-
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
{
gate->a = (seg << 16) | (base & 0xffff);
gate->b = (base & 0xffff0000) |
@@ -84,22 +84,23 @@ static inline int desc_empty(const void *ptr)
#define load_TR_desc() native_load_tr_desc()
#define load_gdt(dtr) native_load_gdt(dtr)
#define load_idt(dtr) native_load_idt(dtr)
-#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
-#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
+#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
#define store_gdt(dtr) native_store_gdt(dtr)
#define store_idt(dtr) native_store_idt(dtr)
#define store_tr(tr) (tr = native_store_tr())
-#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
+#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
#define load_TLS(t, cpu) native_load_tls(t, cpu)
#define set_ldt native_set_ldt
-#define write_ldt_entry(dt, entry, desc) \
- native_write_ldt_entry(dt, entry, desc)
-#define write_gdt_entry(dt, entry, desc, type) \
- native_write_gdt_entry(dt, entry, desc, type)
-#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
+#define write_ldt_entry(dt, entry, desc) \
+ native_write_ldt_entry(dt, entry, desc)
+#define write_gdt_entry(dt, entry, desc, type) \
+ native_write_gdt_entry(dt, entry, desc, type)
+#define write_idt_entry(dt, entry, g) \
+ native_write_idt_entry(dt, entry, g)
#endif
static inline void native_write_idt_entry(gate_desc *idt, int entry,
@@ -138,8 +139,8 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
{
desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
- (limit & 0x000f0000) | ((type & 0xff) << 8) |
- ((flags & 0xf) << 20);
+ (limit & 0x000f0000) | ((type & 0xff) << 8) |
+ ((flags & 0xf) << 20);
desc->p = 1;
}
@@ -159,7 +160,6 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
desc->base3 = PTR_HIGH(addr);
#else
-
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif
}
@@ -177,7 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
* last valid byte
*/
set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
- IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
+ sizeof(unsigned long) - 1);
write_gdt_entry(d, entry, &tss, DESC_TSS);
}
@@ -186,7 +187,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
static inline void native_set_ldt(const void *addr, unsigned int entries)
{
if (likely(entries == 0))
- __asm__ __volatile__("lldt %w0"::"q" (0));
+ asm volatile("lldt %w0"::"q" (0));
else {
unsigned cpu = smp_processor_id();
ldt_desc ldt;
@@ -195,7 +196,7 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
DESC_LDT, entries * sizeof(ldt) - 1);
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
&ldt, DESC_LDT);
- __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+ asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
}
}
@@ -240,15 +241,15 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
-#define _LDT_empty(info) (\
- (info)->base_addr == 0 && \
- (info)->limit == 0 && \
- (info)->contents == 0 && \
- (info)->read_exec_only == 1 && \
- (info)->seg_32bit == 0 && \
- (info)->limit_in_pages == 0 && \
- (info)->seg_not_present == 1 && \
- (info)->useable == 0)
+#define _LDT_empty(info) \
+ ((info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0)
#ifdef CONFIG_X86_64
#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
@@ -287,7 +288,7 @@ static inline unsigned long get_desc_limit(const struct desc_struct *desc)
}
static inline void _set_gate(int gate, unsigned type, void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
+ unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
@@ -370,10 +371,10 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
* Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
*/
#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
- movb idx*8+4(gdt), lo_b; \
- movb idx*8+7(gdt), hi_b; \
- shll $16, base; \
- movw idx*8+2(gdt), lo_w;
+ movb idx * 8 + 4(gdt), lo_b; \
+ movb idx * 8 + 7(gdt), hi_b; \
+ shll $16, base; \
+ movw idx * 8 + 2(gdt), lo_w;
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index e33f078b3e54..eccb4ea1f918 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -18,17 +18,19 @@
* incrementally. We keep the signature as a struct, rather than an union,
* so we can get rid of it transparently in the future -- glommer
*/
-// 8 byte segment descriptor
+/* 8 byte segment descriptor */
struct desc_struct {
union {
- struct { unsigned int a, b; };
+ struct {
+ unsigned int a;
+ unsigned int b;
+ };
struct {
u16 limit0;
u16 base0;
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
};
-
};
} __attribute__((packed));
@@ -39,7 +41,7 @@ enum {
GATE_TASK = 0x5,
};
-// 16byte gate
+/* 16byte gate */
struct gate_struct64 {
u16 offset_low;
u16 segment;
@@ -56,10 +58,10 @@ struct gate_struct64 {
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
- DESCTYPE_S = 0x10, /* !system */
+ DESCTYPE_S = 0x10, /* !system */
};
-// LDT or TSS descriptor in the GDT. 16 bytes.
+/* LDT or TSS descriptor in the GDT. 16 bytes. */
struct ldttss_desc64 {
u16 limit0;
u16 base0;
@@ -84,7 +86,6 @@ struct desc_ptr {
unsigned long address;
} __attribute__((packed)) ;
-
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index e98d16e7a37a..0dbf8bf3ef0a 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -17,18 +17,20 @@
* This ends up being the most efficient "calling
* convention" on x86.
*/
-#define do_div(n,base) ({ \
- unsigned long __upper, __low, __high, __mod, __base; \
- __base = (base); \
- asm("":"=a" (__low), "=d" (__high):"A" (n)); \
- __upper = __high; \
- if (__high) { \
- __upper = __high % (__base); \
- __high = __high / (__base); \
- } \
- asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
- asm("":"=A" (n):"a" (__low),"d" (__high)); \
- __mod; \
+#define do_div(n, base) \
+({ \
+ unsigned long __upper, __low, __high, __mod, __base; \
+ __base = (base); \
+ asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
+ __upper = __high; \
+ if (__high) { \
+ __upper = __high % (__base); \
+ __high = __high / (__base); \
+ } \
+ asm("divl %2":"=a" (__low), "=d" (__mod) \
+ : "rm" (__base), "0" (__low), "1" (__upper)); \
+ asm("":"=A" (n) : "a" (__low), "d" (__high)); \
+ __mod; \
})
/*
@@ -37,14 +39,13 @@
*
* Warning, this will do an exception if X overflows.
*/
-#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
+#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)
-static inline long
-div_ll_X_l_rem(long long divs, long div, long *rem)
+static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
{
long dum2;
- __asm__("divl %2":"=a"(dum2), "=d"(*rem)
- : "rm"(div), "A"(divs));
+ asm("divl %2":"=a"(dum2), "=d"(*rem)
+ : "rm"(div), "A"(divs));
return dum2;
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
index e9733ce89880..ca1098a7e580 100644
--- a/include/asm-x86/dma.h
+++ b/include/asm-x86/dma.h
@@ -12,7 +12,6 @@
#include <asm/io.h> /* need byte IO */
#include <linux/delay.h>
-
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
@@ -74,15 +73,15 @@
#ifdef CONFIG_X86_32
/* The maximum address that we can perform a DMA transfer to on this platform */
-#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
#else
/* 16MB ISA DMA zone */
-#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
+#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
+#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
/* Compat define for old dma zone */
#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
@@ -154,20 +153,20 @@
extern spinlock_t dma_spin_lock;
-static __inline__ unsigned long claim_dma_lock(void)
+static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
-static __inline__ void release_dma_lock(unsigned long flags)
+static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
+static inline void enable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr, DMA1_MASK_REG);
@@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr)
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
-static __inline__ void disable_dma(unsigned int dmanr)
+static inline void disable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
@@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
-static __inline__ void clear_dma_ff(unsigned int dmanr)
+static inline void clear_dma_ff(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(0, DMA1_CLEAR_FF_REG);
@@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr)
}
/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+static inline void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr <= 3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
@@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
-static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+static inline void set_dma_page(unsigned int dmanr, char pagenr)
{
switch (dmanr) {
case 0:
@@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
} else {
- dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
- dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
}
}
@@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+static inline void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
- dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
- dma_outb((count >> 8) & 0xff,
- ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
} else {
- dma_outb((count >> 1) & 0xff,
- ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
- dma_outb((count >> 9) & 0xff,
- ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 1) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 9) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
}
}
@@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
*
* Assumes DMA flip-flop is clear.
*/
-static __inline__ int get_dma_residue(unsigned int dmanr)
+static inline int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port;
/* using short to get 16-bit wrap around */
diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h
index eedc08526b0b..c950519a264d 100644
--- a/include/asm-x86/dwarf2_64.h
+++ b/include/asm-x86/dwarf2_64.h
@@ -1,16 +1,15 @@
#ifndef _DWARF2_H
#define _DWARF2_H 1
-
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
-/*
+/*
Macros for dwarf2 CFI unwind table entries.
- See "as.info" for details on these pseudo ops. Unfortunately
- they are only supported in very new binutils, so define them
- away for older version.
+ See "as.info" for details on these pseudo ops. Unfortunately
+ they are only supported in very new binutils, so define them
+ away for older version.
*/
#ifdef CONFIG_AS_CFI
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
index e7207a6de3e0..43b1a8bd4b34 100644
--- a/include/asm-x86/e820_32.h
+++ b/include/asm-x86/e820_32.h
@@ -34,8 +34,8 @@ extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who);
extern void init_iomem_resources(struct resource *code_resource,
- struct resource *data_resource,
- struct resource *bss_resource);
+ struct resource *data_resource,
+ struct resource *bss_resource);
#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
extern void e820_mark_nosave_regions(void);
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
index 22ede73ae724..f478c57eb060 100644
--- a/include/asm-x86/e820_64.h
+++ b/include/asm-x86/e820_64.h
@@ -14,20 +14,24 @@
#include <linux/ioport.h>
#ifndef __ASSEMBLY__
-extern unsigned long find_e820_area(unsigned long start, unsigned long end,
- unsigned size, unsigned long align);
-extern void add_memory_region(unsigned long start, unsigned long size,
+extern unsigned long find_e820_area(unsigned long start, unsigned long end,
+ unsigned long size, unsigned long align);
+extern unsigned long find_e820_area_size(unsigned long start,
+ unsigned long *sizep,
+ unsigned long align);
+extern void add_memory_region(unsigned long start, unsigned long size,
int type);
extern void update_memory_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern void setup_memory_region(void);
-extern void contig_e820_setup(void);
+extern void contig_e820_setup(void);
extern unsigned long e820_end_of_ram(void);
-extern void e820_reserve_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
+extern void e820_reserve_resources(void);
extern void e820_mark_nosave_regions(void);
-extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+extern int e820_any_mapped(unsigned long start, unsigned long end,
+ unsigned type);
+extern int e820_all_mapped(unsigned long start, unsigned long end,
+ unsigned type);
extern int e820_any_non_reserved(unsigned long start, unsigned long end);
extern int is_memory_any_valid(unsigned long start, unsigned long end);
extern int e820_all_non_reserved(unsigned long start, unsigned long end);
@@ -35,8 +39,8 @@ extern int is_memory_all_valid(unsigned long start, unsigned long end);
extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
extern void e820_setup_gap(void);
-extern void e820_register_active_regions(int nid,
- unsigned long start_pfn, unsigned long end_pfn);
+extern void e820_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
extern void finish_e820_parsing(void);
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index cf3200a745ad..a8088f63a30e 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -3,7 +3,7 @@
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
-static __inline__ void atomic_scrub(void *va, u32 size)
+static inline void atomic_scrub(void *va, u32 size)
{
u32 i, *virt_addr = va;
@@ -12,7 +12,7 @@ static __inline__ void atomic_scrub(void *va, u32 size)
* are interrupt, DMA and SMP safe.
*/
for (i = 0; i < size / 4; i++, virt_addr++)
- __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+ asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
}
#endif
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index ea9734b74aca..d53004b855cc 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -20,7 +20,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
*/
#define efi_call_virt(f, args...) \
- ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
+ ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
#define efi_call_virt0(f) efi_call_virt(f)
#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index fb62f9941e38..8f232dc5b5fe 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -11,7 +11,7 @@
typedef unsigned long elf_greg_t;
-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
@@ -82,8 +82,9 @@ extern unsigned int vdso_enabled;
#define elf_check_arch_ia32(x) \
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
-#ifdef CONFIG_X86_32
#include <asm/processor.h>
+
+#ifdef CONFIG_X86_32
#include <asm/system.h> /* for savesegment */
#include <asm/desc.h>
@@ -99,10 +100,11 @@ extern unsigned int vdso_enabled;
We might as well make sure everything else is cleared too (except for %esp),
just to make things more deterministic.
*/
-#define ELF_PLAT_INIT(_r, load_addr) do { \
- _r->bx = 0; _r->cx = 0; _r->dx = 0; \
- _r->si = 0; _r->di = 0; _r->bp = 0; \
- _r->ax = 0; \
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { \
+ _r->bx = 0; _r->cx = 0; _r->dx = 0; \
+ _r->si = 0; _r->di = 0; _r->bp = 0; \
+ _r->ax = 0; \
} while (0)
/*
@@ -110,24 +112,25 @@ extern unsigned int vdso_enabled;
* now struct_user_regs, they are different)
*/
-#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
- pr_reg[0] = regs->bx; \
- pr_reg[1] = regs->cx; \
- pr_reg[2] = regs->dx; \
- pr_reg[3] = regs->si; \
- pr_reg[4] = regs->di; \
- pr_reg[5] = regs->bp; \
- pr_reg[6] = regs->ax; \
- pr_reg[7] = regs->ds & 0xffff; \
- pr_reg[8] = regs->es & 0xffff; \
- pr_reg[9] = regs->fs & 0xffff; \
- savesegment(gs, pr_reg[10]); \
- pr_reg[11] = regs->orig_ax; \
- pr_reg[12] = regs->ip; \
- pr_reg[13] = regs->cs & 0xffff; \
- pr_reg[14] = regs->flags; \
- pr_reg[15] = regs->sp; \
- pr_reg[16] = regs->ss & 0xffff; \
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
+ pr_reg[0] = regs->bx; \
+ pr_reg[1] = regs->cx; \
+ pr_reg[2] = regs->dx; \
+ pr_reg[3] = regs->si; \
+ pr_reg[4] = regs->di; \
+ pr_reg[5] = regs->bp; \
+ pr_reg[6] = regs->ax; \
+ pr_reg[7] = regs->ds & 0xffff; \
+ pr_reg[8] = regs->es & 0xffff; \
+ pr_reg[9] = regs->fs & 0xffff; \
+ savesegment(gs, pr_reg[10]); \
+ pr_reg[11] = regs->orig_ax; \
+ pr_reg[12] = regs->ip; \
+ pr_reg[13] = regs->cs & 0xffff; \
+ pr_reg[14] = regs->flags; \
+ pr_reg[15] = regs->sp; \
+ pr_reg[16] = regs->ss & 0xffff; \
} while (0);
#define ELF_PLATFORM (utsname()->machine)
@@ -135,12 +138,10 @@ extern unsigned int vdso_enabled;
#else /* CONFIG_X86_32 */
-#include <asm/processor.h>
-
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) \
+#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
@@ -169,24 +170,30 @@ static inline void elf_common_init(struct thread_struct *t,
t->ds = t->es = ds;
}
-#define ELF_PLAT_INIT(_r, load_addr) do { \
- elf_common_init(&current->thread, _r, 0); \
- clear_thread_flag(TIF_IA32); \
+#define ELF_PLAT_INIT(_r, load_addr) \
+do { \
+ elf_common_init(&current->thread, _r, 0); \
+ clear_thread_flag(TIF_IA32); \
} while (0)
-#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
+#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)
-#define compat_start_thread(regs, ip, sp) do { \
- start_ia32_thread(regs, ip, sp); \
- set_fs(USER_DS); \
- } while (0)
-#define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \
- if (test_thread_flag(TIF_IA32)) \
- clear_thread_flag(TIF_ABI_PENDING); \
- else \
- set_thread_flag(TIF_ABI_PENDING); \
- current->personality |= force_personality32; \
- } while (0)
+
+#define compat_start_thread(regs, ip, sp) \
+do { \
+ start_ia32_thread(regs, ip, sp); \
+ set_fs(USER_DS); \
+} while (0)
+
+#define COMPAT_SET_PERSONALITY(ex, ibcs2) \
+do { \
+ if (test_thread_flag(TIF_IA32)) \
+ clear_thread_flag(TIF_ABI_PENDING); \
+ else \
+ set_thread_flag(TIF_ABI_PENDING); \
+ current->personality |= force_personality32; \
+} while (0)
+
#define COMPAT_ELF_PLATFORM ("i686")
/*
@@ -195,7 +202,8 @@ static inline void elf_common_init(struct thread_struct *t,
* getting dumped.
*/
-#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@@ -269,10 +277,12 @@ extern int force_personality32;
struct task_struct;
-#define ARCH_DLINFO_IA32(vdso_enabled) \
-do if (vdso_enabled) { \
+#define ARCH_DLINFO_IA32(vdso_enabled) \
+do { \
+ if (vdso_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
+ } \
} while (0)
#ifdef CONFIG_X86_32
@@ -290,9 +300,11 @@ do if (vdso_enabled) { \
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
-#define ARCH_DLINFO \
-do if (vdso_enabled) { \
- NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
+#define ARCH_DLINFO \
+do { \
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso); \
} while (0)
#define AT_SYSINFO 32
@@ -305,8 +317,8 @@ do if (vdso_enabled) { \
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
-#define VDSO_ENTRY \
- ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+#define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
struct linux_binprm;
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index a7404d50686b..eb1665125c44 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -99,8 +99,7 @@ enum fixed_addresses {
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_NESTING 4
- FIX_BTMAP_END =
- __end_of_permanent_fixed_addresses + 512 -
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 -
(__end_of_permanent_fixed_addresses & 511),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
FIX_WP_TEST,
@@ -110,20 +109,20 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
+extern void __set_fixmap(enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
extern void reserve_top_address(unsigned long reserve);
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
@@ -156,7 +155,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx)
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
+ return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 70ddb21e6458..f3d76858c0e6 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -34,32 +34,34 @@
enum fixed_addresses {
VSYSCALL_LAST_PAGE,
- VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
FIX_HPET_BASE,
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
FIX_EFI_IO_MAP_LAST_PAGE,
- FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1,
+ FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
+ + MAX_EFI_IO_PAGES - 1,
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
__end_of_fixed_addresses
};
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
+extern void __set_fixmap(enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index a48d7153c097..dbe82a5c5eac 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -20,20 +20,21 @@
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
-#define _CROSS_64KB(a,s,vdma) \
-(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+#define _CROSS_64KB(a, s, vdma) \
+ (!(vdma) && \
+ ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)
-#define SW fd_routine[use_virtual_dma&1]
+#define SW fd_routine[use_virtual_dma & 1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
+#define fd_outb(value, port) outb_p(value, port)
-#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
@@ -52,64 +53,64 @@ static int doing_pdma;
static irqreturn_t floppy_hardint(int irq, void *dev_id)
{
- register unsigned char st;
+ unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
- static int calls=0;
- static int bytes=0;
- static int dma_wait=0;
+ static int calls;
+ static int bytes;
+ static int dma_wait;
#endif
if (!doing_pdma)
return floppy_interrupt(irq, dev_id);
#ifdef TRACE_FLPY_INT
- if(!calls)
+ if (!calls)
bytes = virtual_dma_count;
#endif
{
- register int lcount;
- register char *lptr;
+ int lcount;
+ char *lptr;
st = 1;
- for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
- lcount; lcount--, lptr++) {
- st=inb(virtual_dma_port+4) & 0xa0 ;
- if(st != 0xa0)
+ for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st = inb(virtual_dma_port + 4) & 0xa0;
+ if (st != 0xa0)
break;
- if(virtual_dma_mode)
- outb_p(*lptr, virtual_dma_port+5);
+ if (virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port + 5);
else
- *lptr = inb_p(virtual_dma_port+5);
+ *lptr = inb_p(virtual_dma_port + 5);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
- st = inb(virtual_dma_port+4);
+ st = inb(virtual_dma_port + 4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
- if(st == 0x20)
+ if (st == 0x20)
return IRQ_HANDLED;
- if(!(st & 0x20)) {
+ if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
+ virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
- dma_wait=0;
+ dma_wait = 0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id);
return IRQ_HANDLED;
}
#ifdef TRACE_FLPY_INT
- if(!virtual_dma_count)
+ if (!virtual_dma_count)
dma_wait++;
#endif
return IRQ_HANDLED;
@@ -117,14 +118,14 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
static void fd_disable_dma(void)
{
- if(! (can_use_virtual_dma & 1))
+ if (!(can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
+ virtual_dma_count = 0;
}
-static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+static int vdma_request_dma(unsigned int dmanr, const char *device_id)
{
return 0;
}
@@ -142,7 +143,7 @@ static int vdma_get_dma_residue(unsigned int dummy)
static int fd_request_irq(void)
{
- if(can_use_virtual_dma)
+ if (can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
IRQF_DISABLED, "floppy", NULL);
else
@@ -152,13 +153,13 @@ static int fd_request_irq(void)
static unsigned long dma_mem_alloc(unsigned long size)
{
- return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
+ return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
- return (unsigned long) vmalloc(size);
+ return (unsigned long)vmalloc(size);
}
@@ -166,7 +167,7 @@ static unsigned long vdma_mem_alloc(unsigned long size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
- if((unsigned long) addr >= (unsigned long) high_memory)
+ if ((unsigned long)addr >= (unsigned long)high_memory)
vfree((void *)addr);
else
free_pages(addr, get_order(size));
@@ -176,10 +177,10 @@ static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
- if(can_use_virtual_dma == 2) {
- if((unsigned long) addr >= (unsigned long) high_memory ||
- isa_virt_to_bus(addr) >= 0x1000000 ||
- _CROSS_64KB(addr, size, 0))
+ if (can_use_virtual_dma == 2) {
+ if ((unsigned long)addr >= (unsigned long)high_memory ||
+ isa_virt_to_bus(addr) >= 0x1000000 ||
+ _CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
@@ -195,7 +196,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
- virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
@@ -213,18 +214,18 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
- set_dma_mode(FLOPPY_DMA,mode);
- set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
- set_dma_count(FLOPPY_DMA,size);
+ set_dma_mode(FLOPPY_DMA, mode);
+ set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
+ set_dma_count(FLOPPY_DMA, size);
enable_dma(FLOPPY_DMA);
return 0;
}
static struct fd_routine_l {
- int (*_request_dma)(unsigned int dmanr, const char * device_id);
+ int (*_request_dma)(unsigned int dmanr, const char *device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
- unsigned long (*_dma_mem_alloc) (unsigned long size);
+ unsigned long (*_dma_mem_alloc)(unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
@@ -252,7 +253,8 @@ static int FDC2 = -1;
* is needed to prevent corrupted CMOS RAM in case "insmod floppy"
* coincides with another rtc CMOS user. Paul G.
*/
-#define FLOPPY0_TYPE ({ \
+#define FLOPPY0_TYPE \
+({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
@@ -261,7 +263,8 @@ static int FDC2 = -1;
val; \
})
-#define FLOPPY1_TYPE ({ \
+#define FLOPPY1_TYPE \
+({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index c9952ea9f698..ac0fbf24d722 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -12,35 +12,32 @@
#include <asm/uaccess.h>
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile( \
-"1: " insn "\n" \
-"2: .section .fixup,\"ax\"\n \
-3: mov %3, %1\n \
- jmp 2b\n \
- .previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
- : "i" (-EFAULT), "0" (oparg), "1" (0))
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile( \
-"1: movl %2, %0\n \
- movl %0, %3\n" \
- insn "\n" \
-"2: lock; cmpxchgl %3, %2\n \
- jnz 1b\n \
-3: .section .fixup,\"ax\"\n \
-4: mov %5, %1\n \
- jmp 3b\n \
- .previous\n" \
- _ASM_EXTABLE(1b,4b) \
- _ASM_EXTABLE(2b,4b) \
- : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
- "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "1" (0))
-
-static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+ "2:\tlock; cmpxchgl %3, %2\n" \
+ "\tjnz\t1b\n" \
+ "3:\t.section .fixup,\"ax\"\n" \
+ "4:\tmov\t%5, %1\n" \
+ "\tjmp\t3b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+ "+m" (*uaddr), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -87,20 +84,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (!ret) {
switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
}
}
return ret;
}
-static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+ int newval)
{
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
@@ -112,16 +122,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- __asm__ __volatile__(
- "1: lock; cmpxchgl %3, %1 \n"
- "2: .section .fixup, \"ax\" \n"
- "3: mov %2, %0 \n"
- " jmp 2b \n"
- " .previous \n"
- _ASM_EXTABLE(1b,3b)
- : "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "0" (oldval)
- : "memory"
+ asm volatile("1:\tlock; cmpxchgl %3, %1\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %2, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
+ : "memory"
);
return oldval;
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index 33e3ffe1766c..f1b96932746b 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -14,23 +14,22 @@
* Copyright 2003 Andi Kleen, SuSE Labs.
*/
-struct mpc_config_translation;
struct mpc_config_bus;
struct mp_config_table;
struct mpc_config_processor;
-struct genapic {
- char *name;
- int (*probe)(void);
+struct genapic {
+ char *name;
+ int (*probe)(void);
int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void);
int int_delivery_mode;
- int int_dest_mode;
+ int int_dest_mode;
int ESR_DISABLE;
int apic_destination_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
- unsigned long (*check_apicid_present)(int apicid);
+ unsigned long (*check_apicid_present)(int apicid);
int no_balance_irq;
int no_ioapic_check;
void (*init_apic_ldr)(void);
@@ -38,28 +37,21 @@ struct genapic {
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
- int (*apicid_to_node)(int logical_apicid);
+ int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
- int (*mpc_apic_id)(struct mpc_config_processor *m,
- struct mpc_config_translation *t);
- void (*setup_portio_remap)(void);
+ void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
void (*enable_apic_mode)(void);
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
/* mpparse */
- void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *,
- struct mpc_config_translation *);
- void (*mpc_oem_pci_bus)(struct mpc_config_bus *,
- struct mpc_config_translation *);
-
/* When one of the next two hooks returns 1 the genapic
- is switched to this. Essentially they are additional probe
+ is switched to this. Essentially they are additional probe
functions. */
- int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
- char *productid);
+ int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
+ char *productid);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
unsigned (*get_apic_id)(unsigned long x);
@@ -72,7 +64,7 @@ struct genapic {
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
#endif
-};
+};
#define APICFUNC(x) .x = x,
@@ -85,43 +77,46 @@ struct genapic {
#define IPIFUNC(x)
#endif
-#define APIC_INIT(aname, aprobe) { \
- .name = aname, \
- .probe = aprobe, \
- .int_delivery_mode = INT_DELIVERY_MODE, \
- .int_dest_mode = INT_DEST_MODE, \
- .no_balance_irq = NO_BALANCE_IRQ, \
- .ESR_DISABLE = esr_disable, \
- .apic_destination_logical = APIC_DEST_LOGICAL, \
- APICFUNC(apic_id_registered) \
- APICFUNC(target_cpus) \
- APICFUNC(check_apicid_used) \
- APICFUNC(check_apicid_present) \
- APICFUNC(init_apic_ldr) \
- APICFUNC(ioapic_phys_id_map) \
- APICFUNC(setup_apic_routing) \
- APICFUNC(multi_timer_check) \
- APICFUNC(apicid_to_node) \
- APICFUNC(cpu_to_logical_apicid) \
- APICFUNC(cpu_present_to_apicid) \
- APICFUNC(apicid_to_cpu_present) \
- APICFUNC(mpc_apic_id) \
- APICFUNC(setup_portio_remap) \
- APICFUNC(check_phys_apicid_present) \
- APICFUNC(mpc_oem_bus_info) \
- APICFUNC(mpc_oem_pci_bus) \
- APICFUNC(mps_oem_check) \
- APICFUNC(get_apic_id) \
- .apic_id_mask = APIC_ID_MASK, \
- APICFUNC(cpu_mask_to_apicid) \
- APICFUNC(acpi_madt_oem_check) \
- IPIFUNC(send_IPI_mask) \
- IPIFUNC(send_IPI_allbutself) \
- IPIFUNC(send_IPI_all) \
- APICFUNC(enable_apic_mode) \
- APICFUNC(phys_pkg_id) \
- }
+#define APIC_INIT(aname, aprobe) \
+{ \
+ .name = aname, \
+ .probe = aprobe, \
+ .int_delivery_mode = INT_DELIVERY_MODE, \
+ .int_dest_mode = INT_DEST_MODE, \
+ .no_balance_irq = NO_BALANCE_IRQ, \
+ .ESR_DISABLE = esr_disable, \
+ .apic_destination_logical = APIC_DEST_LOGICAL, \
+ APICFUNC(apic_id_registered) \
+ APICFUNC(target_cpus) \
+ APICFUNC(check_apicid_used) \
+ APICFUNC(check_apicid_present) \
+ APICFUNC(init_apic_ldr) \
+ APICFUNC(ioapic_phys_id_map) \
+ APICFUNC(setup_apic_routing) \
+ APICFUNC(multi_timer_check) \
+ APICFUNC(apicid_to_node) \
+ APICFUNC(cpu_to_logical_apicid) \
+ APICFUNC(cpu_present_to_apicid) \
+ APICFUNC(apicid_to_cpu_present) \
+ APICFUNC(setup_portio_remap) \
+ APICFUNC(check_phys_apicid_present) \
+ APICFUNC(mps_oem_check) \
+ APICFUNC(get_apic_id) \
+ .apic_id_mask = APIC_ID_MASK, \
+ APICFUNC(cpu_mask_to_apicid) \
+ APICFUNC(acpi_madt_oem_check) \
+ IPIFUNC(send_IPI_mask) \
+ IPIFUNC(send_IPI_allbutself) \
+ IPIFUNC(send_IPI_all) \
+ APICFUNC(enable_apic_mode) \
+ APICFUNC(phys_pkg_id) \
+}
extern struct genapic *genapic;
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+#define get_uv_system_type() UV_NONE
+#define is_uv_system() 0
+
+
#endif
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index d7e516ccbaa4..1de931b263ce 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -33,5 +33,15 @@ extern struct genapic *genapic;
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
+extern int acpi_madt_oem_check(char *, char *);
+
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+extern enum uv_system_type get_uv_system_type(void);
+extern int is_uv_system(void);
+
+extern struct genapic apic_x2apic_uv_x;
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+extern void uv_cpu_init(void);
+extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
#endif
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 9e7280092a48..9870cc1f2f8f 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -167,7 +167,7 @@ static inline int is_geode(void)
/* MFGPTs */
#define MFGPT_MAX_TIMERS 8
-#define MFGPT_TIMER_ANY -1
+#define MFGPT_TIMER_ANY (-1)
#define MFGPT_DOMAIN_WORKING 1
#define MFGPT_DOMAIN_STANDBY 2
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 479767c9195f..e153f3b44774 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -8,7 +8,7 @@
* Gerhard.Wichert@pdb.siemens.de
*
*
- * Redesigned the x86 32-bit VM architecture to deal with
+ * Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 312a58d6dac6..0062ef390f67 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -36,7 +36,7 @@
* cleanup after irq migration.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
-
+
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
@@ -159,13 +159,12 @@ extern atomic_t irq_mis_count;
* SMP has a few special interrupts for IPI messages
*/
-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n.p2align\n" \
-"IRQ" #nr "_interrupt:\n\t" \
- "push $~(" #nr ") ; " \
- "jmp common_interrupt");
+#define BUILD_IRQ(nr) \
+ asmlinkage void IRQ_NAME(nr); \
+ asm("\n.p2align\n" \
+ "IRQ" #nr "_interrupt:\n\t" \
+ "push $~(" #nr ") ; " \
+ "jmp common_interrupt");
#define platform_legacy_irq(irq) ((irq) < 16)
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
index c16c6ff4bdd7..d2bbd238b3e1 100644
--- a/include/asm-x86/hypertransport.h
+++ b/include/asm-x86/hypertransport.h
@@ -8,12 +8,14 @@
#define HT_IRQ_LOW_BASE 0xf8000000
#define HT_IRQ_LOW_VECTOR_SHIFT 16
-#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
-#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
+#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
+#define HT_IRQ_LOW_VECTOR(v) \
+ (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
#define HT_IRQ_LOW_DEST_ID_SHIFT 8
-#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
-#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
+#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
+#define HT_IRQ_LOW_DEST_ID(v) \
+ (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
@@ -36,7 +38,8 @@
#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
-#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
-#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
+#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
+#define HT_IRQ_HIGH_DEST_ID(v) \
+ ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
#endif /* ASM_HYPERTRANSPORT_H */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index f377b76b2f34..54522b814f1c 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -41,7 +41,7 @@ static inline void tolerant_fwait(void)
{
asm volatile("1: fwait\n"
"2:\n"
- _ASM_EXTABLE(1b,2b));
+ _ASM_EXTABLE(1b, 2b));
}
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
@@ -54,7 +54,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- _ASM_EXTABLE(1b,3b)
+ _ASM_EXTABLE(1b, 3b)
: [err] "=r" (err)
#if 0 /* See comment in __save_init_fpu() below. */
: [fx] "r" (fx), "m" (*fx), "0" (0));
@@ -76,11 +76,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
{
if (unlikely(fx->swd & X87_FSW_ES))
- asm volatile("fnclex");
+ asm volatile("fnclex");
alternative_input(ASM_NOP8 ASM_NOP2,
- " emms\n" /* clear stack tags */
- " fildl %%gs:0", /* load to clear state */
- X86_FEATURE_FXSAVE_LEAK);
+ " emms\n" /* clear stack tags */
+ " fildl %%gs:0", /* load to clear state */
+ X86_FEATURE_FXSAVE_LEAK);
}
static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
@@ -93,14 +93,15 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- _ASM_EXTABLE(1b,3b)
+ _ASM_EXTABLE(1b, 3b)
: [err] "=r" (err), "=m" (*fx)
#if 0 /* See comment in __fxsave_clear() below. */
: [fx] "r" (fx), "0" (0));
#else
: [fx] "cdaSDb" (fx), "0" (0));
#endif
- if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
+ if (unlikely(err) &&
+ __clear_user(fx, sizeof(struct i387_fxsave_struct)))
err = -EFAULT;
/* No need to clear here because the caller clears USED_MATH */
return err;
@@ -156,8 +157,10 @@ static inline int save_i387(struct _fpstate __user *buf)
return 0;
clear_used_math(); /* trigger finit */
if (task_thread_info(tsk)->status & TS_USEDFPU) {
- err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
- if (err) return err;
+ err = save_i387_checking((struct i387_fxsave_struct __user *)
+ buf);
+ if (err)
+ return err;
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
} else {
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 67c319e0efc7..45d4df3e51e6 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -1,9 +1,11 @@
#ifndef __ASM_I8259_H__
#define __ASM_I8259_H__
+#include <linux/delay.h>
+
extern unsigned int cached_irq_mask;
-#define __byte(x,y) (((unsigned char *) &(y))[x])
+#define __byte(x, y) (((unsigned char *)&(y))[x])
#define cached_master_mask (__byte(0, cached_irq_mask))
#define cached_slave_mask (__byte(1, cached_irq_mask))
@@ -29,7 +31,28 @@ extern void enable_8259A_irq(unsigned int irq);
extern void disable_8259A_irq(unsigned int irq);
extern unsigned int startup_8259A_irq(unsigned int irq);
-#define inb_pic inb_p
-#define outb_pic outb_p
+/* the PIC may need a careful delay on some platforms, hence specific calls */
+static inline unsigned char inb_pic(unsigned int port)
+{
+ unsigned char value = inb(port);
+
+ /*
+ * delay for some accesses to PIC on motherboard or in chipset
+ * must be at least one microsecond, so be safe here:
+ */
+ udelay(2);
+
+ return value;
+}
+
+static inline void outb_pic(unsigned char value, unsigned int port)
+{
+ outb(value, port);
+ /*
+ * delay for some accesses to PIC on motherboard or in chipset
+ * must be at least one microsecond, so be safe here:
+ */
+ udelay(2);
+}
#endif /* __ASM_I8259_H__ */
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index aa9733206e29..55d3abe5276f 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -14,19 +14,19 @@
/* signal.h */
struct sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+ compat_sigset_t sa_mask; /* A 32 bit mask */
};
struct old_sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- compat_old_sigset_t sa_mask; /* A 32 bit mask */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ compat_old_sigset_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
};
typedef struct sigaltstack_ia32 {
@@ -65,7 +65,7 @@ struct stat64 {
long long st_size;
unsigned int st_blksize;
- long long st_blocks;/* Number 512-byte blocks allocated. */
+ long long st_blocks;/* Number 512-byte blocks allocated */
unsigned st_atime;
unsigned st_atime_nsec;
@@ -77,13 +77,13 @@ struct stat64 {
unsigned long long st_ino;
} __attribute__((packed));
-typedef struct compat_siginfo{
+typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
union {
- int _pad[((128/sizeof(int)) - 3)];
+ int _pad[((128 / sizeof(int)) - 3)];
/* kill() */
struct {
@@ -129,28 +129,26 @@ typedef struct compat_siginfo{
} _sifields;
} compat_siginfo_t;
-struct sigframe32
-{
- u32 pretcode;
- int sig;
- struct sigcontext_ia32 sc;
- struct _fpstate_ia32 fpstate;
- unsigned int extramask[_COMPAT_NSIG_WORDS-1];
+struct sigframe32 {
+ u32 pretcode;
+ int sig;
+ struct sigcontext_ia32 sc;
+ struct _fpstate_ia32 fpstate;
+ unsigned int extramask[_COMPAT_NSIG_WORDS-1];
};
-struct rt_sigframe32
-{
- u32 pretcode;
- int sig;
- u32 pinfo;
- u32 puc;
- compat_siginfo_t info;
- struct ucontext_ia32 uc;
- struct _fpstate_ia32 fpstate;
+struct rt_sigframe32 {
+ u32 pretcode;
+ int sig;
+ u32 pinfo;
+ u32 puc;
+ compat_siginfo_t info;
+ struct ucontext_ia32 uc;
+ struct _fpstate_ia32 fpstate;
};
struct ustat32 {
- __u32 f_tfree;
+ __u32 f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
@@ -168,5 +166,5 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
#endif
#endif /* !CONFIG_IA32_SUPPORT */
-
-#endif
+
+#endif
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h
index c2552d8bebf7..cf9c98e5bdb5 100644
--- a/include/asm-x86/ide.h
+++ b/include/asm-x86/ide.h
@@ -20,8 +20,6 @@
# endif
#endif
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
static __inline__ int ide_default_irq(unsigned long base)
{
switch (base) {
@@ -60,14 +58,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
}
}
-#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-#define ide_init_default_irq(base) (0)
-#else
-#define ide_init_default_irq(base) ide_default_irq(base)
-#endif
-
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index 5a58b176dd61..7b292d386713 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -1,5 +1,11 @@
+#define ARCH_HAS_IOREMAP_WC
+
#ifdef CONFIG_X86_32
# include "io_32.h"
#else
# include "io_64.h"
#endif
+extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+ unsigned long prot_val);
+extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
+
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index d4d8fbd9378c..509045f5fda2 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -65,14 +65,14 @@
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
+ * addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
-
-static inline unsigned long virt_to_phys(volatile void * address)
+
+static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
@@ -90,7 +90,7 @@ static inline unsigned long virt_to_phys(volatile void * address)
* this function
*/
-static inline void * phys_to_virt(unsigned long address)
+static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
@@ -169,16 +169,19 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
static inline unsigned char readb(const volatile void __iomem *addr)
{
- return *(volatile unsigned char __force *) addr;
+ return *(volatile unsigned char __force *)addr;
}
+
static inline unsigned short readw(const volatile void __iomem *addr)
{
- return *(volatile unsigned short __force *) addr;
+ return *(volatile unsigned short __force *)addr;
}
+
static inline unsigned int readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
+
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
@@ -188,15 +191,17 @@ static inline unsigned int readl(const volatile void __iomem *addr)
static inline void writeb(unsigned char b, volatile void __iomem *addr)
{
- *(volatile unsigned char __force *) addr = b;
+ *(volatile unsigned char __force *)addr = b;
}
+
static inline void writew(unsigned short b, volatile void __iomem *addr)
{
- *(volatile unsigned short __force *) addr = b;
+ *(volatile unsigned short __force *)addr = b;
}
+
static inline void writel(unsigned int b, volatile void __iomem *addr)
{
- *(volatile unsigned int __force *) addr = b;
+ *(volatile unsigned int __force *)addr = b;
}
#define __raw_writeb writeb
#define __raw_writew writew
@@ -239,12 +244,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count)
* 1. Out of order aware processors
* 2. Accidentally out of order processors (PPro errata #51)
*/
-
+
#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
static inline void flush_write_buffers(void)
{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+ asm volatile("lock; addl $0,0(%%esp)": : :"memory");
}
#else
@@ -264,7 +269,8 @@ extern void io_delay_init(void);
#include <asm/paravirt.h>
#else
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
native_io_delay();
#ifdef REALLY_SLOW_IO
native_io_delay();
@@ -275,51 +281,74 @@ static inline void slow_down_io(void) {
#endif
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl(unsigned type value, int port) { \
- out##bwl##_local(value, port); \
-} \
-static inline unsigned type in##bwl(int port) { \
- return in##bwl##_local(port); \
+#define __BUILDIO(bwl, bw, type) \
+static inline void out##bwl(unsigned type value, int port) \
+{ \
+ out##bwl##_local(value, port); \
+} \
+ \
+static inline unsigned type in##bwl(int port) \
+{ \
+ return in##bwl##_local(port); \
}
-#define BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
- __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
- unsigned type value; \
- __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
- return value; \
-} \
-static inline void out##bwl##_local_p(unsigned type value, int port) { \
- out##bwl##_local(value, port); \
- slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_local_p(int port) { \
- unsigned type value = in##bwl##_local(port); \
- slow_down_io(); \
- return value; \
-} \
-__BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_p(unsigned type value, int port) { \
- out##bwl(value, port); \
- slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_p(int port) { \
- unsigned type value = in##bwl(port); \
- slow_down_io(); \
- return value; \
-} \
-static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
- __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-} \
-static inline void ins##bwl(int port, void *addr, unsigned long count) { \
- __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+#define BUILDIO(bwl, bw, type) \
+static inline void out##bwl##_local(unsigned type value, int port) \
+{ \
+ asm volatile("out" #bwl " %" #bw "0, %w1" \
+ : : "a"(value), "Nd"(port)); \
+} \
+ \
+static inline unsigned type in##bwl##_local(int port) \
+{ \
+ unsigned type value; \
+ asm volatile("in" #bwl " %w1, %" #bw "0" \
+ : "=a"(value) : "Nd"(port)); \
+ return value; \
+} \
+ \
+static inline void out##bwl##_local_p(unsigned type value, int port) \
+{ \
+ out##bwl##_local(value, port); \
+ slow_down_io(); \
+} \
+ \
+static inline unsigned type in##bwl##_local_p(int port) \
+{ \
+ unsigned type value = in##bwl##_local(port); \
+ slow_down_io(); \
+ return value; \
+} \
+ \
+__BUILDIO(bwl, bw, type) \
+ \
+static inline void out##bwl##_p(unsigned type value, int port) \
+{ \
+ out##bwl(value, port); \
+ slow_down_io(); \
+} \
+ \
+static inline unsigned type in##bwl##_p(int port) \
+{ \
+ unsigned type value = in##bwl(port); \
+ slow_down_io(); \
+ return value; \
+} \
+ \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+{ \
+ asm volatile("rep; outs" #bwl \
+ : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+ \
+static inline void ins##bwl(int port, void *addr, unsigned long count) \
+{ \
+ asm volatile("rep; ins" #bwl \
+ : "+D"(addr), "+c"(count) : "d"(port)); \
}
-BUILDIO(b,b,char)
-BUILDIO(w,w,short)
-BUILDIO(l,,int)
+BUILDIO(b, b, char)
+BUILDIO(w, w, short)
+BUILDIO(l, , int)
#endif
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index db0be2011a3c..c2f5eef47b88 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -58,60 +58,75 @@ static inline void slow_down_io(void)
/*
* Talk about misusing macros..
*/
-#define __OUT1(s,x) \
+#define __OUT1(s, x) \
static inline void out##s(unsigned x value, unsigned short port) {
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+#define __OUT2(s, s1, s2) \
+asm volatile ("out" #s " %" s1 "0,%" s2 "1"
#ifndef REALLY_SLOW_IO
#define REALLY_SLOW_IO
#define UNSET_REALLY_SLOW_IO
#endif
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
- slow_down_io(); }
+#define __OUT(s, s1, x) \
+ __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
+ } \
+ __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
+ slow_down_io(); \
+}
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) \
+{ \
+ RETURN_TYPE _v;
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+#define __IN2(s, s1, s2) \
+ asm volatile ("in" #s " %" s2 "1,%" s1 "0"
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \
-__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
- slow_down_io(); return _v; }
+#define __IN(s, s1, i...) \
+ __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
+ return _v; \
+ } \
+ __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
+ slow_down_io(); \
+ return _v; }
#ifdef UNSET_REALLY_SLOW_IO
#undef REALLY_SLOW_IO
#endif
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void *addr, \
+ unsigned long count) \
+{ \
+ asm volatile ("rep ; ins" #s \
+ : "=D" (addr), "=c" (count) \
+ : "d" (port), "0" (addr), "1" (count)); \
+}
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void *addr, \
+ unsigned long count) \
+{ \
+ asm volatile ("rep ; outs" #s \
+ : "=S" (addr), "=c" (count) \
+ : "d" (port), "0" (addr), "1" (count)); \
+}
#define RETURN_TYPE unsigned char
-__IN(b,"")
+__IN(b, "")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned short
-__IN(w,"")
+__IN(w, "")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned int
-__IN(l,"")
+__IN(l, "")
#undef RETURN_TYPE
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
+__OUT(b, "b", char)
+__OUT(w, "w", short)
+__OUT(l, , int)
__INS(b)
__INS(w)
@@ -132,12 +147,12 @@ __OUTS(l)
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
-static inline void * phys_to_virt(unsigned long address)
+static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
@@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr)
{
return *(__force volatile __u8 *)addr;
}
+
static inline __u16 __readw(const volatile void __iomem *addr)
{
return *(__force volatile __u16 *)addr;
}
+
static __always_inline __u32 __readl(const volatile void __iomem *addr)
{
return *(__force volatile __u32 *)addr;
}
+
static inline __u64 __readq(const volatile void __iomem *addr)
{
return *(__force volatile __u64 *)addr;
}
+
#define readb(x) __readb(x)
#define readw(x) __readw(x)
#define readl(x) __readl(x)
@@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
}
+
static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;
}
+
static inline void __writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)addr = b;
}
+
static inline void __writew(__u16 b, volatile void __iomem *addr)
{
*(__force volatile __u16 *)addr = b;
}
-#define writeq(val,addr) __writeq((val),(addr))
-#define writel(val,addr) __writel((val),(addr))
-#define writew(val,addr) __writew((val),(addr))
-#define writeb(val,addr) __writeb((val),(addr))
+
+#define writeq(val, addr) __writeq((val), (addr))
+#define writel(val, addr) __writel((val), (addr))
+#define writew(val, addr) __writew((val), (addr))
+#define writeb(val, addr) __writeb((val), (addr))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
-void __memcpy_fromio(void*,unsigned long,unsigned);
-void __memcpy_toio(unsigned long,const void*,unsigned);
+void __memcpy_fromio(void *, unsigned long, unsigned);
+void __memcpy_toio(unsigned long, const void *, unsigned);
-static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
+ unsigned len)
{
- __memcpy_fromio(to,(unsigned long)from,len);
+ __memcpy_fromio(to, (unsigned long)from, len);
}
-static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+
+static inline void memcpy_toio(volatile void __iomem *to, const void *from,
+ unsigned len)
{
- __memcpy_toio((unsigned long)to,from,len);
+ __memcpy_toio((unsigned long)to, from, len);
}
void memset_io(volatile void __iomem *a, int b, size_t c);
@@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-#define flush_write_buffers()
+#define flush_write_buffers()
extern int iommu_bio_merge;
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 0f5b3fef0b08..0c9e17c73e05 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -110,6 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS];
* MP-BIOS irq configuration table structures:
*/
+struct mp_ioapic_routing {
+ int apic_id;
+ int gsi_base;
+ int gsi_end;
+ u32 pin_programmed[4];
+};
+
/* I/O APIC entries */
extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
@@ -146,7 +153,6 @@ extern int io_apic_get_version(int ioapic);
extern int io_apic_get_redir_entries(int ioapic);
extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
int edge_level, int active_high_low);
-extern int timer_uses_ioapic_pin_0;
#endif /* CONFIG_ACPI */
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index 93c894dc5154..c0c338bd4068 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -47,12 +47,13 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T',0x2A, struct termios2)
-#define TCSETS2 _IOW('T',0x2B, struct termios2)
-#define TCSETSW2 _IOW('T',0x2C, struct termios2)
-#define TCSETSF2 _IOW('T',0x2D, struct termios2)
-#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TCGETS2 _IOR('T', 0x2A, struct termios2)
+#define TCSETS2 _IOW('T', 0x2B, struct termios2)
+#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
+#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
+#define TIOCGPTN _IOR('T', 0x30, unsigned int)
+ /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index 2adf8b39a40b..ee678fd51594 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -11,8 +11,7 @@
* - 2 miscellaneous 32-bit values
*/
-struct ipc64_perm
-{
+struct ipc64_perm {
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 6d011bd6067d..ecc80f341f37 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -27,7 +27,8 @@
* We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
*/
-static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
+static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
+ unsigned int dest)
{
unsigned int icr = shortcut | dest;
@@ -42,12 +43,13 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns
return icr;
}
-static inline int __prepare_ICR2 (unsigned int mask)
+static inline int __prepare_ICR2(unsigned int mask)
{
return SET_APIC_DEST_FIELD(mask);
}
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
+static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
+ unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
@@ -78,7 +80,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
-static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
+static inline void __send_IPI_dest_field(unsigned int mask, int vector,
+ unsigned int dest)
{
unsigned long cfg;
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
index aca9c96e8e6b..0b79f3185243 100644
--- a/include/asm-x86/irq_32.h
+++ b/include/asm-x86/irq_32.h
@@ -15,7 +15,7 @@
#include "irq_vectors.h"
#include <asm/thread_info.h>
-static __inline__ int irq_canonicalize(int irq)
+static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
}
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h
index 5006c6e75656..083d35a62c94 100644
--- a/include/asm-x86/irq_64.h
+++ b/include/asm-x86/irq_64.h
@@ -31,10 +31,10 @@
#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
-#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS))
+#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
#define NR_IRQ_VECTORS NR_IRQS
-static __inline__ int irq_canonicalize(int irq)
+static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
}
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 0e2292483b35..c242527f970e 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -12,25 +12,21 @@ static inline unsigned long native_save_fl(void)
{
unsigned long flags;
- __asm__ __volatile__(
- "# __raw_save_flags\n\t"
- "pushf ; pop %0"
- : "=g" (flags)
- : /* no input */
- : "memory"
- );
+ asm volatile("# __raw_save_flags\n\t"
+ "pushf ; pop %0"
+ : "=g" (flags)
+ : /* no input */
+ : "memory");
return flags;
}
static inline void native_restore_fl(unsigned long flags)
{
- __asm__ __volatile__(
- "push %0 ; popf"
- : /* no output */
- :"g" (flags)
- :"memory", "cc"
- );
+ asm volatile("push %0 ; popf"
+ : /* no output */
+ :"g" (flags)
+ :"memory", "cc");
}
static inline void native_irq_disable(void)
@@ -70,26 +66,6 @@ static inline void raw_local_irq_restore(unsigned long flags)
native_restore_fl(flags);
}
-#ifdef CONFIG_X86_VSMP
-
-/*
- * Interrupt control for the VSMP architecture:
- */
-
-static inline void raw_local_irq_disable(void)
-{
- unsigned long flags = __raw_local_save_flags();
- raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-
-static inline void raw_local_irq_enable(void)
-{
- unsigned long flags = __raw_local_save_flags();
- raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-
-#else
-
static inline void raw_local_irq_disable(void)
{
native_irq_disable();
@@ -100,8 +76,6 @@ static inline void raw_local_irq_enable(void)
native_irq_enable();
}
-#endif
-
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
@@ -153,23 +127,16 @@ static inline unsigned long __raw_local_irq_save(void)
#endif /* CONFIG_PARAVIRT */
#ifndef __ASSEMBLY__
-#define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)
-#define raw_local_irq_save(flags) \
- do { (flags) = __raw_local_irq_save(); } while (0)
+#define raw_local_irq_save(flags) \
+ do { (flags) = __raw_local_irq_save(); } while (0)
-#ifdef CONFIG_X86_VSMP
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
- return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
-}
-#else
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}
-#endif
static inline int raw_irqs_disabled(void)
{
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 99dcbafa1511..96651bb59ba1 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -20,15 +20,16 @@ enum die_val {
DIE_CALL,
DIE_NMI_IPI,
DIE_PAGE_FAULT,
+ DIE_NMIUNKNOWN,
};
extern void printk_address(unsigned long address, int reliable);
-extern void die(const char *,struct pt_regs *,long);
+extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_registers(struct pt_regs *regs);
extern void __show_registers(struct pt_regs *, int all);
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
- unsigned long *sp, unsigned long bp);
+ unsigned long *sp, unsigned long bp);
extern void __show_regs(struct pt_regs *regs);
extern void show_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index c90d3c77afc2..8f855a15f64d 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
{
#ifdef CONFIG_X86_32
newregs->sp = (unsigned long)&(oldregs->sp);
- __asm__ __volatile__(
- "xorl %%eax, %%eax\n\t"
- "movw %%ss, %%ax\n\t"
- :"=a"(newregs->ss));
+ asm volatile("xorl %%eax, %%eax\n\t"
+ "movw %%ss, %%ax\n\t"
+ :"=a"(newregs->ss));
#endif
}
@@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
crash_fixup_ss_esp(newregs, oldregs);
} else {
#ifdef CONFIG_X86_32
- __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx));
- __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx));
- __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx));
- __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si));
- __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di));
- __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp));
- __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax));
- __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp));
- __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
- __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
- __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds));
- __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es));
- __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags));
+ asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
+ asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
+ asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
+ asm volatile("movl %%esi,%0" : "=m"(newregs->si));
+ asm volatile("movl %%edi,%0" : "=m"(newregs->di));
+ asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
+ asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
+ asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
+ asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
+ asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
+ asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
#else
- __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx));
- __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx));
- __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx));
- __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si));
- __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di));
- __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp));
- __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax));
- __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp));
- __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
- __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
- __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
- __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
- __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
- __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
- __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
- __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
- __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
- __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
- __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags));
+ asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
+ asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
+ asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
+ asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
+ asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
+ asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
+ asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
+ asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
+ asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
+ asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
+ asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
+ asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
+ asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
+ asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
+ asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
+ asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
+ asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
#endif
newregs->ip = (unsigned long)current_text_addr();
}
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
new file mode 100644
index 000000000000..484c47554f3b
--- /dev/null
+++ b/include/asm-x86/kgdb.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_KGDB_H_
+#define _ASM_KGDB_H_
+
+/*
+ * Copyright (C) 2001-2004 Amit S. Kale
+ * Copyright (C) 2008 Wind River Systems, Inc.
+ */
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound
+ * buffers at least NUMREGBYTES*2 are needed for register packets
+ * Longer buffer is needed to list all threads
+ */
+#define BUFMAX 1024
+
+/*
+ * Note that this register image is in a different order than
+ * the register image that Linux produces at interrupt time.
+ *
+ * Linux's register image is defined by struct pt_regs in ptrace.h.
+ * Just why GDB uses a different order is a historical mystery.
+ */
+#ifdef CONFIG_X86_32
+enum regnames {
+ GDB_AX, /* 0 */
+ GDB_CX, /* 1 */
+ GDB_DX, /* 2 */
+ GDB_BX, /* 3 */
+ GDB_SP, /* 4 */
+ GDB_BP, /* 5 */
+ GDB_SI, /* 6 */
+ GDB_DI, /* 7 */
+ GDB_PC, /* 8 also known as eip */
+ GDB_PS, /* 9 also known as eflags */
+ GDB_CS, /* 10 */
+ GDB_SS, /* 11 */
+ GDB_DS, /* 12 */
+ GDB_ES, /* 13 */
+ GDB_FS, /* 14 */
+ GDB_GS, /* 15 */
+};
+#else /* ! CONFIG_X86_32 */
+enum regnames {
+ GDB_AX, /* 0 */
+ GDB_DX, /* 1 */
+ GDB_CX, /* 2 */
+ GDB_BX, /* 3 */
+ GDB_SI, /* 4 */
+ GDB_DI, /* 5 */
+ GDB_BP, /* 6 */
+ GDB_SP, /* 7 */
+ GDB_R8, /* 8 */
+ GDB_R9, /* 9 */
+ GDB_R10, /* 10 */
+ GDB_R11, /* 11 */
+ GDB_R12, /* 12 */
+ GDB_R13, /* 13 */
+ GDB_R14, /* 14 */
+ GDB_R15, /* 15 */
+ GDB_PC, /* 16 */
+ GDB_PS, /* 17 */
+};
+#endif /* CONFIG_X86_32 */
+
+/*
+ * Number of bytes of registers:
+ */
+#ifdef CONFIG_X86_32
+# define NUMREGBYTES 64
+#else
+# define NUMREGBYTES ((GDB_PS+1)*8)
+#endif
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(" int $3");
+}
+#define BREAK_INSTR_SIZE 1
+#define CACHE_FLUSH_IS_SAFE 1
+
+#endif /* _ASM_KGDB_H_ */
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 61ad7b5d142e..54980b0b3892 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -35,12 +35,12 @@ typedef u8 kprobe_opcode_t;
#define RELATIVEJUMP_INSTRUCTION 0xe9
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
- (((unsigned long)current_thread_info()) + THREAD_SIZE \
- - (unsigned long)(ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + THREAD_SIZE \
- - (unsigned long)(ADDR)))
+#define MIN_STACK_SIZE(ADDR) \
+ (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+ THREAD_SIZE - (unsigned long)(ADDR))) \
+ ? (MAX_STACK_SIZE) \
+ : (((unsigned long)current_thread_info()) + \
+ THREAD_SIZE - (unsigned long)(ADDR)))
#define flush_insn_slot(p) do { } while (0)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 4702b04b979a..68ee390b2844 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -22,15 +22,16 @@
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
-#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
+#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
+ 0xFFFFFF0000000000ULL)
-#define KVM_GUEST_CR0_MASK \
+#define KVM_GUEST_CR0_MASK \
(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
| X86_CR0_NW | X86_CR0_CD)
-#define KVM_VM_CR0_ALWAYS_ON \
+#define KVM_VM_CR0_ALWAYS_ON \
(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
| X86_CR0_MP)
-#define KVM_GUEST_CR4_MASK \
+#define KVM_GUEST_CR4_MASK \
(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -133,12 +134,12 @@ struct kvm_pte_chain {
union kvm_mmu_page_role {
unsigned word;
struct {
- unsigned glevels : 4;
- unsigned level : 4;
- unsigned quadrant : 2;
- unsigned pad_for_nice_hex_output : 6;
- unsigned metaphysical : 1;
- unsigned access : 3;
+ unsigned glevels:4;
+ unsigned level:4;
+ unsigned quadrant:2;
+ unsigned pad_for_nice_hex_output:6;
+ unsigned metaphysical:1;
+ unsigned access:3;
};
};
@@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
-#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+#define RMODE_TSS_SIZE \
+ (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
#endif
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index 7db91b9bdcd4..d6337f941c98 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -68,10 +68,10 @@ struct x86_emulate_ops {
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to read from memory.
*/
- int (*read_emulated) (unsigned long addr,
- void *val,
- unsigned int bytes,
- struct kvm_vcpu *vcpu);
+ int (*read_emulated)(unsigned long addr,
+ void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
/*
* write_emulated: Read bytes from emulated/special memory area.
@@ -80,10 +80,10 @@ struct x86_emulate_ops {
* required).
* @bytes: [IN ] Number of bytes to write to memory.
*/
- int (*write_emulated) (unsigned long addr,
- const void *val,
- unsigned int bytes,
- struct kvm_vcpu *vcpu);
+ int (*write_emulated)(unsigned long addr,
+ const void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
/*
* cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -93,11 +93,11 @@ struct x86_emulate_ops {
* @new: [IN ] Value to write to @addr.
* @bytes: [IN ] Number of bytes to access using CMPXCHG.
*/
- int (*cmpxchg_emulated) (unsigned long addr,
- const void *old,
- const void *new,
- unsigned int bytes,
- struct kvm_vcpu *vcpu);
+ int (*cmpxchg_emulated)(unsigned long addr,
+ const void *old,
+ const void *new,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
};
@@ -143,7 +143,7 @@ struct x86_emulate_ctxt {
/* Register state before/after emulation. */
struct kvm_vcpu *vcpu;
- /* Linear faulting address (if emulating a page-faulting instruction). */
+ /* Linear faulting address (if emulating a page-faulting instruction) */
unsigned long eflags;
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index 9b17571e9bc3..be4a7247fa2b 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[];
extern void lguest_iret(void);
extern void lguest_init(void);
-struct lguest_regs
-{
+struct lguest_regs {
/* Manually saved part. */
unsigned long eax, ebx, ecx, edx;
unsigned long esi, edi, ebp;
@@ -51,8 +50,7 @@ struct lguest_regs
};
/* This is a guest-specific page (mapped ro) into the guest. */
-struct lguest_ro_state
-{
+struct lguest_ro_state {
/* Host information we need to restore when we switch back. */
u32 host_cr3;
struct desc_ptr host_idt_desc;
@@ -67,8 +65,7 @@ struct lguest_ro_state
struct desc_struct guest_gdt[GDT_ENTRIES];
};
-struct lg_cpu_arch
-{
+struct lg_cpu_arch {
/* The GDT entries copied into lguest_ro_state when running. */
struct desc_struct gdt[GDT_ENTRIES];
@@ -85,7 +82,7 @@ static inline void lguest_set_ts(void)
cr0 = read_cr0();
if (!(cr0 & 8))
- write_cr0(cr0|8);
+ write_cr0(cr0 | 8);
}
/* Full 4G segment descriptors, suitable for CS and DS. */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index f239e7069cab..a3241f28e34a 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -46,7 +46,7 @@ hcall(unsigned long call,
{
/* "int" is the Intel instruction to trigger a trap. */
asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
- /* The call in %eax (aka "a") might be overwritten */
+ /* The call in %eax (aka "a") might be overwritten */
: "=a"(call)
/* The arguments are in %eax, %edx, %ebx & %ecx */
: "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
@@ -62,8 +62,7 @@ hcall(unsigned long call,
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
#define LHCALL_RING_SIZE 64
-struct hcall_args
-{
+struct hcall_args {
/* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */
unsigned long arg0, arg2, arg3, arg1;
};
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index c048353f4b85..64e444f8e85b 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -1,6 +1,9 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
+#undef notrace
+#define notrace __attribute__((no_instrument_function))
+
#ifdef CONFIG_X86_64
#define __ALIGN .p2align 4,,15
#define __ALIGN_STR ".p2align 4,,15"
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index f852c62b3319..330a72496abd 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -18,32 +18,28 @@ typedef struct {
static inline void local_inc(local_t *l)
{
- __asm__ __volatile__(
- _ASM_INC "%0"
- :"+m" (l->a.counter));
+ asm volatile(_ASM_INC "%0"
+ : "+m" (l->a.counter));
}
static inline void local_dec(local_t *l)
{
- __asm__ __volatile__(
- _ASM_DEC "%0"
- :"+m" (l->a.counter));
+ asm volatile(_ASM_DEC "%0"
+ : "+m" (l->a.counter));
}
static inline void local_add(long i, local_t *l)
{
- __asm__ __volatile__(
- _ASM_ADD "%1,%0"
- :"+m" (l->a.counter)
- :"ir" (i));
+ asm volatile(_ASM_ADD "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
}
static inline void local_sub(long i, local_t *l)
{
- __asm__ __volatile__(
- _ASM_SUB "%1,%0"
- :"+m" (l->a.counter)
- :"ir" (i));
+ asm volatile(_ASM_SUB "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
}
/**
@@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l)
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_SUB "%2,%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(_ASM_SUB "%2,%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
@@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l)
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_DEC "%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- : : "memory");
+ asm volatile(_ASM_DEC "%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
@@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l)
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_INC "%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- : : "memory");
+ asm volatile(_ASM_INC "%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
@@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l)
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_ADD "%2,%0; sets %1"
- :"+m" (l->a.counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(_ASM_ADD "%2,%0; sets %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
@@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l)
#endif
/* Modern 486+ processor */
__i = i;
- __asm__ __volatile__(
- _ASM_XADD "%0, %1;"
- :"+r" (i), "+m" (l->a.counter)
- : : "memory");
+ asm volatile(_ASM_XADD "%0, %1;"
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
return i + __i;
#ifdef CONFIG_M386
@@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l)
#define local_add_unless(l, a, u) \
({ \
long c, old; \
- c = local_read(l); \
+ c = local_read((l)); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
- old = local_cmpxchg((l), c, c + (a)); \
+ old = local_cmpxchg((l), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
@@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l)
/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
+#define cpu_local_wrap_v(l) \
+({ \
+ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; \
+})
#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
+({ \
+ preempt_disable(); \
+ (l); \
+ preempt_enable(); \
+}) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
+
+#define __cpu_local_inc(l) cpu_local_inc((l))
+#define __cpu_local_dec(l) cpu_local_dec((l))
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h
index 6df235e8ea91..8327907c79bf 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h
@@ -1,10 +1,7 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
-
-extern u8 bios_cpu_apicid[];
-
-#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
+#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
#define esr_disable (1)
static inline int apic_id_registered(void)
@@ -90,7 +87,7 @@ static inline int apicid_to_node(int logical_apicid)
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < NR_CPUS)
- return (int) bios_cpu_apicid[mps_cpu];
+ return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
return BAD_APICID;
}
@@ -109,17 +106,6 @@ static inline int cpu_to_logical_apicid(int cpu)
return cpu_physical_id(cpu);
}
-static inline int mpc_apic_id(struct mpc_config_processor *m,
- struct mpc_config_translation *translation_record)
-{
- printk("Processor #%d %u:%u APIC version %d\n",
- m->mpc_apicid,
- (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
- (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
- m->mpc_apicver);
- return m->mpc_apicid;
-}
-
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index e3c2c1012c1c..0a6634f62abe 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -1,6 +1,8 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
+#ifdef CONFIG_X86_LOCAL_APIC
+
#include <mach_apicdef.h>
#include <asm/smp.h>
@@ -14,24 +16,25 @@ static inline cpumask_t target_cpus(void)
return cpumask_of_cpu(0);
#endif
}
-#define TARGET_CPUS (target_cpus())
#define NO_BALANCE_IRQ (0)
#define esr_disable (0)
+#ifdef CONFIG_X86_64
+#include <asm/genapic.h>
+#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
+#define INT_DEST_MODE (genapic->int_dest_mode)
+#define TARGET_CPUS (genapic->target_cpus())
+#define apic_id_registered (genapic->apic_id_registered)
+#define init_apic_ldr (genapic->init_apic_ldr)
+#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define phys_pkg_id (genapic->phys_pkg_id)
+#define vector_allocation_domain (genapic->vector_allocation_domain)
+extern void setup_apic_routing(void);
+#else
#define INT_DELIVERY_MODE dest_LowestPrio
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return physid_isset(apicid, bitmap);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-
+#define TARGET_CPUS (target_cpus())
/*
* Set up the logical destination ID.
*
@@ -49,23 +52,51 @@ static inline void init_apic_ldr(void)
apic_write_around(APIC_LDR, val);
}
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
+static inline int apic_id_registered(void)
{
- return phys_map;
+ return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+ return cpus_addr(cpumask)[0];
+}
+
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
}
static inline void setup_apic_routing(void)
{
+#ifdef CONFIG_X86_IO_APIC
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Flat", nr_ioapics);
+#endif
}
-static inline int multi_timer_check(int apic, int irq)
+static inline int apicid_to_node(int logical_apicid)
{
return 0;
}
+#endif
-static inline int apicid_to_node(int logical_apicid)
+static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long check_apicid_present(int bit)
+{
+ return physid_isset(bit, phys_cpu_present_map);
+}
+
+static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
+{
+ return phys_map;
+}
+
+static inline int multi_timer_check(int apic, int irq)
{
return 0;
}
@@ -78,8 +109,13 @@ static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_present_to_apicid(int mps_cpu)
{
+#ifdef CONFIG_X86_64
+ if (cpu_present(mps_cpu))
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+#else
if (mps_cpu < get_physical_broadcast())
return mps_cpu;
+#endif
else
return BAD_APICID;
}
@@ -89,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
return physid_mask_of_physid(phys_apicid);
}
-static inline int mpc_apic_id(struct mpc_config_processor *m,
- struct mpc_config_translation *translation_record)
-{
- printk("Processor #%d %u:%u APIC version %d\n",
- m->mpc_apicid,
- (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
- (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
- m->mpc_apicver);
- return m->mpc_apicid;
-}
-
static inline void setup_portio_remap(void)
{
}
@@ -109,23 +134,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
}
-static inline int apic_id_registered(void)
-{
- return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
-}
-
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-{
- return cpus_addr(cpumask)[0];
-}
-
static inline void enable_apic_mode(void)
{
}
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
+#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index ae9841319094..e4b29ba37de6 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -3,10 +3,14 @@
#include <asm/apic.h>
+#ifdef CONFIG_X86_64
+#define APIC_ID_MASK (0xFFu<<24)
+#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_ID(x) (((x)<<24))
+#else
#define APIC_ID_MASK (0xF<<24)
-
static inline unsigned get_apic_id(unsigned long x)
-{
+{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
if (APIC_XAPIC(ver))
return (((x)>>24)&0xFF);
@@ -15,5 +19,6 @@ static inline unsigned get_apic_id(unsigned long x)
}
#define GET_APIC_ID(x) get_apic_id(x)
+#endif
#endif
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h
index 0dba244c86db..be323364e68f 100644
--- a/include/asm-x86/mach-default/mach_ipi.h
+++ b/include/asm-x86/mach-default/mach_ipi.h
@@ -9,10 +9,15 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector);
extern int no_broadcast;
+#ifdef CONFIG_X86_64
+#include <asm/genapic.h>
+#define send_IPI_mask (genapic->send_IPI_mask)
+#else
static inline void send_IPI_mask(cpumask_t mask, int vector)
{
send_IPI_mask_bitmask(mask, vector);
}
+#endif
static inline void __local_send_IPI_allbutself(int vector)
{
@@ -33,6 +38,10 @@ static inline void __local_send_IPI_all(int vector)
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
+#ifdef CONFIG_X86_64
+#define send_IPI_allbutself (genapic->send_IPI_allbutself)
+#define send_IPI_all (genapic->send_IPI_all)
+#else
static inline void send_IPI_allbutself(int vector)
{
/*
@@ -50,5 +59,6 @@ static inline void send_IPI_all(int vector)
{
__local_send_IPI_all(vector);
}
+#endif
#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h
index 1d3832482580..d14108505bb8 100644
--- a/include/asm-x86/mach-default/mach_mpparse.h
+++ b/include/asm-x86/mach-default/mach_mpparse.h
@@ -1,17 +1,6 @@
#ifndef __ASM_MACH_MPPARSE_H
#define __ASM_MACH_MPPARSE_H
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
- struct mpc_config_translation *translation)
-{
-// Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
- struct mpc_config_translation *translation)
-{
-}
-
static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h
deleted file mode 100644
index 6adee6a97dec..000000000000
--- a/include/asm-x86/mach-default/mach_reboot.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * arch/i386/mach-generic/mach_reboot.h
- *
- * Machine specific reboot functions for generic.
- * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_REBOOT_H
-#define _MACH_REBOOT_H
-
-static inline void kb_wait(void)
-{
- int i;
-
- for (i = 0; i < 0x10000; i++)
- if ((inb_p(0x64) & 0x02) == 0)
- break;
-}
-
-static inline void mach_reboot(void)
-{
- int i;
-
- /* old method, works on most machines */
- for (i = 0; i < 10; i++) {
- kb_wait();
- udelay(50);
- outb(0xfe, 0x64); /* pulse reset low */
- udelay(50);
- }
-
- /* New method: sets the "System flag" which, when set, indicates
- * successful completion of the keyboard controller self-test (Basic
- * Assurance Test, BAT). This is needed for some machines with no
- * keyboard plugged in. This read-modify-write sequence sets only the
- * system flag
- */
- for (i = 0; i < 10; i++) {
- int cmd;
-
- outb(0x20, 0x64); /* read Controller Command Byte */
- udelay(50);
- kb_wait();
- udelay(50);
- cmd = inb(0x60);
- udelay(50);
- kb_wait();
- udelay(50);
- outb(0x60, 0x64); /* write Controller Command Byte */
- udelay(50);
- kb_wait();
- udelay(50);
- outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */
- udelay(50);
- kb_wait();
- udelay(50);
- outb(0xfe, 0x64); /* pulse reset low */
- udelay(50);
- }
-}
-
-#endif /* !_MACH_REBOOT_H */
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 7f45f6311059..3ff2c5bff93a 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -41,4 +41,11 @@ static inline void smpboot_setup_io_apic(void)
*/
if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
+ else
+ nr_ioapics = 0;
+}
+
+static inline void smpboot_clear_io_apic(void)
+{
+ nr_ioapics = 0;
}
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h
index d23011fdf454..fbc8ad256f5a 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/mach-es7000/mach_apic.h
@@ -1,9 +1,7 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
-extern u8 bios_cpu_apicid[];
-
-#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
+#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
#define esr_disable (1)
static inline int apic_id_registered(void)
@@ -80,7 +78,7 @@ extern void enable_apic_mode(void);
extern int apic_version [MAX_APICS];
static inline void setup_apic_routing(void)
{
- int apic = bios_cpu_apicid[smp_processor_id()];
+ int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
@@ -102,7 +100,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
if (!mps_cpu)
return boot_cpu_physical_apicid;
else if (mps_cpu < NR_CPUS)
- return (int) bios_cpu_apicid[mps_cpu];
+ return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
@@ -129,16 +127,6 @@ static inline int cpu_to_logical_apicid(int cpu)
#endif
}
-static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
-{
- printk("Processor #%d %u:%u APIC version %d\n",
- m->mpc_apicid,
- (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
- (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
- m->mpc_apicver);
- return (m->mpc_apicid);
-}
-
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
@@ -153,7 +141,7 @@ static inline void setup_portio_remap(void)
extern unsigned int boot_cpu_physical_apicid;
static inline int check_phys_apicid_present(int cpu_physical_apicid)
{
- boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
return (1);
}
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h
index 52ee75cd0fe1..ef26d3523625 100644
--- a/include/asm-x86/mach-es7000/mach_mpparse.h
+++ b/include/asm-x86/mach-es7000/mach_mpparse.h
@@ -3,17 +3,6 @@
#include <linux/acpi.h>
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
- struct mpc_config_translation *translation)
-{
- Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
- struct mpc_config_translation *translation)
-{
-}
-
extern int parse_unisys_oem (char *oemptr);
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
extern void setup_unisys(void);
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
index a236e7021528..6eff343e1233 100644
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ b/include/asm-x86/mach-generic/mach_apic.h
@@ -19,7 +19,6 @@
#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
-#define mpc_apic_id (genapic->mpc_apic_id)
#define setup_portio_remap (genapic->setup_portio_remap)
#define check_apicid_present (genapic->check_apicid_present)
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
index dbd9fce54f4d..0d0b5ba2e9d1 100644
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ b/include/asm-x86/mach-generic/mach_mpparse.h
@@ -1,11 +1,6 @@
#ifndef _MACH_MPPARSE_H
#define _MACH_MPPARSE_H 1
-#include <asm/genapic.h>
-
-#define mpc_oem_bus_info (genapic->mpc_oem_bus_info)
-#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus)
-
int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid);
int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h
index 3b637fac890b..75a56e5afbe7 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/mach-numaq/mach_apic.h
@@ -95,6 +95,16 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
return physid_mask_of_physid(cpu + 4*node);
}
+struct mpc_config_translation {
+ unsigned char mpc_type;
+ unsigned char trans_len;
+ unsigned char trans_type;
+ unsigned char trans_quad;
+ unsigned char trans_global;
+ unsigned char trans_local;
+ unsigned short trans_reserved;
+};
+
static inline int mpc_apic_id(struct mpc_config_processor *m,
struct mpc_config_translation *translation_record)
{
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
index 51bbac8fc0c2..459b12401187 100644
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ b/include/asm-x86/mach-numaq/mach_mpparse.h
@@ -1,25 +1,10 @@
#ifndef __ASM_MACH_MPPARSE_H
#define __ASM_MACH_MPPARSE_H
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
- struct mpc_config_translation *translation)
-{
- int quad = translation->trans_quad;
- int local = translation->trans_local;
-
- mp_bus_id_to_node[m->mpc_busid] = quad;
- mp_bus_id_to_local[m->mpc_busid] = local;
- printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
- struct mpc_config_translation *translation)
-{
- int quad = translation->trans_quad;
- int local = translation->trans_local;
-
- quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
-}
+extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
+ struct mpc_config_translation *translation);
+extern void mpc_oem_pci_bus(struct mpc_config_bus *m,
+ struct mpc_config_translation *translation);
/* Hook from generic ACPI tables.c */
static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index 062c97f6100b..1f76c2e70232 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -40,7 +40,6 @@ static inline unsigned long check_apicid_present(int bit)
#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
-extern u8 bios_cpu_apicid[];
extern u8 cpu_2_logical_apicid[];
static inline void init_apic_ldr(void)
@@ -110,7 +109,7 @@ static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < NR_CPUS)
- return (int)bios_cpu_apicid[mps_cpu];
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
@@ -126,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid)
return physid_mask_of_physid(0);
}
-static inline int mpc_apic_id(struct mpc_config_processor *m,
- struct mpc_config_translation *translation_record)
-{
- printk("Processor #%d %u:%u APIC version %d\n",
- m->mpc_apicid,
- (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
- (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
- m->mpc_apicver);
- return m->mpc_apicid;
-}
-
static inline void setup_portio_remap(void)
{
}
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h
index c2520539d934..fdf591701339 100644
--- a/include/asm-x86/mach-summit/mach_mpparse.h
+++ b/include/asm-x86/mach-summit/mach_mpparse.h
@@ -12,17 +12,6 @@ extern void setup_summit(void);
#define setup_summit() {}
#endif
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
- struct mpc_config_translation *translation)
-{
- Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
- struct mpc_config_translation *translation)
-{
-}
-
static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h
index efac6f0d139f..a9ef33a8a995 100644
--- a/include/asm-x86/mach-visws/mach_apic.h
+++ b/include/asm-x86/mach-visws/mach_apic.h
@@ -23,7 +23,7 @@
static inline int apic_id_registered(void)
{
- return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
+ return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
}
/*
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h
index d926471fa359..c9b83e395a2e 100644
--- a/include/asm-x86/mach-visws/smpboot_hooks.h
+++ b/include/asm-x86/mach-visws/smpboot_hooks.h
@@ -22,3 +22,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
static inline void smpboot_setup_io_apic(void)
{
}
+
+static inline void smpboot_clear_io_apic(void)
+{
+}
diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h
deleted file mode 100644
index 7b7115a0c1c9..000000000000
--- a/include/asm-x86/mach_apic.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch defines.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-#include <asm/genapic.h>
-
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS (genapic->target_cpus())
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define phys_pkg_id (genapic->phys_pkg_id)
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
index cdd9f965835a..daf1ccde77af 100644
--- a/include/asm-x86/mc146818rtc.h
+++ b/include/asm-x86/mc146818rtc.h
@@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock;
static inline void lock_cmos(unsigned char reg)
{
unsigned long new;
- new = ((smp_processor_id()+1) << 8) | reg;
+ new = ((smp_processor_id() + 1) << 8) | reg;
for (;;) {
if (cmos_lock) {
cpu_relax();
@@ -57,22 +57,26 @@ static inline void unlock_cmos(void)
{
cmos_lock = 0;
}
+
static inline int do_i_have_lock_cmos(void)
{
- return (cmos_lock >> 8) == (smp_processor_id()+1);
+ return (cmos_lock >> 8) == (smp_processor_id() + 1);
}
+
static inline unsigned char current_lock_cmos_reg(void)
{
return cmos_lock & 0xff;
}
-#define lock_cmos_prefix(reg) \
+
+#define lock_cmos_prefix(reg) \
do { \
unsigned long cmos_flags; \
local_irq_save(cmos_flags); \
lock_cmos(reg)
-#define lock_cmos_suffix(reg) \
- unlock_cmos(); \
- local_irq_restore(cmos_flags); \
+
+#define lock_cmos_suffix(reg) \
+ unlock_cmos(); \
+ local_irq_restore(cmos_flags); \
} while (0)
#else
#define lock_cmos_prefix(reg) do {} while (0)
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
index fbb1f3b71279..c3dca6edc6b1 100644
--- a/include/asm-x86/mca_dma.h
+++ b/include/asm-x86/mca_dma.h
@@ -12,18 +12,18 @@
* count by 2 when using 16-bit dma; that is not handled by these functions.
*
* Ramen Noodles are yummy.
- *
- * 1998 Tymm Twillman <tymm@computer.org>
+ *
+ * 1998 Tymm Twillman <tymm@computer.org>
*/
/*
- * Registers that are used by the DMA controller; FN is the function register
+ * Registers that are used by the DMA controller; FN is the function register
* (tell the controller what to do) and EXE is the execution register (how
* to do it)
*/
#define MCA_DMA_REG_FN 0x18
-#define MCA_DMA_REG_EXE 0x1A
+#define MCA_DMA_REG_EXE 0x1A
/*
* Functions that the DMA controller can do
@@ -43,9 +43,9 @@
/*
* Modes (used by setting MCA_DMA_FN_MODE in the function register)
- *
+ *
* Note that the MODE_READ is read from memory (write to device), and
- * MODE_WRITE is vice-versa.
+ * MODE_WRITE is vice-versa.
*/
#define MCA_DMA_MODE_XFER 0x04 /* read by default */
@@ -63,7 +63,7 @@
* IRQ context.
*/
-static __inline__ void mca_enable_dma(unsigned int dmanr)
+static inline void mca_enable_dma(unsigned int dmanr)
{
outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
}
@@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr)
* IRQ context.
*/
-static __inline__ void mca_disable_dma(unsigned int dmanr)
+static inline void mca_disable_dma(unsigned int dmanr)
{
outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
}
@@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr)
* @a: 24bit bus address
*
* Load the address register in the DMA controller. This has a 24bit
- * limitation (16Mb).
+ * limitation (16Mb).
*/
-static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
+static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
{
outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
outb(a & 0xff, MCA_DMA_REG_EXE);
@@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
* limitation (16Mb). The return is a bus address.
*/
-static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
+static inline unsigned int mca_get_dma_addr(unsigned int dmanr)
{
unsigned int addr;
outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
addr = inb(MCA_DMA_REG_EXE);
addr |= inb(MCA_DMA_REG_EXE) << 8;
- addr |= inb(MCA_DMA_REG_EXE) << 16;
+ addr |= inb(MCA_DMA_REG_EXE) << 16;
return addr;
}
@@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
* Setting a count of zero will not do what you expect.
*/
-static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
+static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count)
{
count--; /* transfers one more than count -- correct for this */
@@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
* on this DMA channel.
*/
-static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
+static inline unsigned int mca_get_dma_residue(unsigned int dmanr)
{
unsigned short count;
@@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
* with an I/O port target.
*/
-static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
+static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
{
/*
* DMA from a port address -- set the io address
*/
-
+
outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
outb(io_addr & 0xff, MCA_DMA_REG_EXE);
outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE);
@@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
* %MCA_DMA_MODE_16 to do 16bit transfers.
*/
-static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
+static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
{
outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
outb(mode, MCA_DMA_REG_EXE);
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index efa962c38897..00e88679e11f 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -10,10 +10,10 @@
*
* cpu_vm_mask is used to optimize ldt flushing.
*/
-typedef struct {
+typedef struct {
void *ldt;
#ifdef CONFIG_X86_64
- rwlock_t ldtlock;
+ rwlock_t ldtlock;
#endif
int size;
struct mutex lock;
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 8198d1cca1f3..9756ae0f1dd3 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev,
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
+ /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload %cr3.
*/
load_cr3(next->pgd);
@@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev,
#define deactivate_mm(tsk, mm) \
asm("movl %0,%%gs": :"r" (0));
-#define activate_mm(prev, next) \
- do { \
- paravirt_activate_mm(prev, next); \
- switch_mm((prev),(next),NULL); \
- } while(0);
+#define activate_mm(prev, next) \
+do { \
+ paravirt_activate_mm((prev), (next)); \
+ switch_mm((prev), (next), NULL); \
+} while (0);
#endif
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index ad6dc821ef9e..ca44c71e7fb3 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
- if (read_pda(mmu_state) == TLBSTATE_OK)
+ if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
#endif
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
- if (unlikely(next->context.ldt != prev->context.ldt))
+ if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (read_pda(active_mm) != next)
BUG();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
+ /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { \
- load_gs_index(0); \
- asm volatile("movl %0,%%fs"::"r"(0)); \
-} while(0)
+#define deactivate_mm(tsk, mm) \
+do { \
+ load_gs_index(0); \
+ asm volatile("movl %0,%%fs"::"r"(0)); \
+} while (0)
-#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL)
+#define activate_mm(prev, next) \
+ switch_mm((prev), (next), NULL)
#endif
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
index 46b71da99869..940881218ff8 100644
--- a/include/asm-x86/mmx.h
+++ b/include/asm-x86/mmx.h
@@ -6,7 +6,7 @@
*/
#include <linux/types.h>
-
+
extern void *_mmx_memcpy(void *to, const void *from, size_t size);
extern void mmx_clear_page(void *page);
extern void mmx_copy_page(void *to, void *from);
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index 274a59566c45..cb2cad0b65a7 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -18,7 +18,7 @@ extern struct pglist_data *node_data[];
#include <asm/srat.h>
#endif
-extern int get_memcfg_numa_flat(void );
+extern int get_memcfg_numa_flat(void);
/*
* This allows any one NUMA architecture to be compiled
* for, and still fall back to the flat function if it
@@ -129,7 +129,7 @@ static inline int pfn_valid(int pfn)
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
- __pa(MAX_DMA_ADDRESS)) \
+ __pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_low_pages_node(pgdat, x) \
({ \
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index ebaf9663aa8a..594bd0dc1d08 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_NUMA
-#define VIRTUAL_BUG_ON(x)
+#define VIRTUAL_BUG_ON(x)
#include <asm/smp.h>
@@ -16,7 +16,7 @@ struct memnode {
int shift;
unsigned int mapsize;
s16 *map;
- s16 embedded_map[64-8];
+ s16 embedded_map[64 - 8];
} ____cacheline_aligned; /* total size = 128 bytes */
extern struct memnode memnode;
#define memnode_shift memnode.shift
@@ -25,27 +25,27 @@ extern struct memnode memnode;
extern struct pglist_data *node_data[];
-static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
-{
- unsigned nid;
+static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
+{
+ unsigned nid;
VIRTUAL_BUG_ON(!memnodemap);
VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
- nid = memnodemap[addr >> memnode_shift];
- VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
- return nid;
-}
+ nid = memnodemap[addr >> memnode_shift];
+ VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
+ return nid;
+}
#define NODE_DATA(nid) (node_data[nid])
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
+#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
NODE_DATA(nid)->node_spanned_pages)
extern int early_pfn_to_nid(unsigned long pfn);
#ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE (64*1024*1024)
-#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL))
+#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024)
+#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
#endif
#endif
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 781ad74ab9e9..57a991b9c053 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -1,16 +1,13 @@
#ifndef _AM_X86_MPSPEC_H
#define _AM_X86_MPSPEC_H
+#include <linux/init.h>
+
#include <asm/mpspec_def.h>
#ifdef CONFIG_X86_32
#include <mach_mpspec.h>
-extern int mp_bus_id_to_type[MAX_MP_BUSSES];
-extern int mp_bus_id_to_node[MAX_MP_BUSSES];
-extern int mp_bus_id_to_local[MAX_MP_BUSSES];
-extern int quad_local_to_mp_bus_id[NR_CPUS/4][4];
-
extern unsigned int def_to_bigsmp;
extern int apic_version[MAX_APICS];
extern u8 apicid_2_node[];
@@ -24,27 +21,30 @@ extern int pic_mode;
/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
-extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+extern void early_find_smp_config(void);
+extern void early_get_smp_config(void);
#endif
+#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
+extern int mp_bus_id_to_type[MAX_MP_BUSSES];
+#endif
+
+extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+
extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
extern unsigned int boot_cpu_physical_apicid;
extern int smp_found_config;
-extern int nr_ioapics;
-extern int mp_irq_entries;
-extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
extern void find_smp_config(void);
extern void get_smp_config(void);
+void __cpuinit generic_processor_info(int apicid, int version);
#ifdef CONFIG_ACPI
-extern void mp_register_lapic(u8 id, u8 enabled);
-extern void mp_register_lapic_address(u64 address);
-extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base);
+extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
u32 gsi);
extern void mp_config_acpi_legacy_irqs(void);
@@ -53,8 +53,7 @@ extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
-struct physid_mask
-{
+struct physid_mask {
unsigned long mask[PHYSID_ARRAY_SIZE];
};
@@ -63,34 +62,34 @@ typedef struct physid_mask physid_mask_t;
#define physid_set(physid, map) set_bit(physid, (map).mask)
#define physid_clear(physid, map) clear_bit(physid, (map).mask)
#define physid_isset(physid, map) test_bit(physid, (map).mask)
-#define physid_test_and_set(physid, map) \
+#define physid_test_and_set(physid, map) \
test_and_set_bit(physid, (map).mask)
-#define physids_and(dst, src1, src2) \
+#define physids_and(dst, src1, src2) \
bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
-#define physids_or(dst, src1, src2) \
+#define physids_or(dst, src1, src2) \
bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
-#define physids_clear(map) \
+#define physids_clear(map) \
bitmap_zero((map).mask, MAX_APICS)
-#define physids_complement(dst, src) \
+#define physids_complement(dst, src) \
bitmap_complement((dst).mask, (src).mask, MAX_APICS)
-#define physids_empty(map) \
+#define physids_empty(map) \
bitmap_empty((map).mask, MAX_APICS)
-#define physids_equal(map1, map2) \
+#define physids_equal(map1, map2) \
bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
-#define physids_weight(map) \
+#define physids_weight(map) \
bitmap_weight((map).mask, MAX_APICS)
-#define physids_shift_right(d, s, n) \
+#define physids_shift_right(d, s, n) \
bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
-#define physids_shift_left(d, s, n) \
+#define physids_shift_left(d, s, n) \
bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
#define physids_coerce(map) ((map).mask[0])
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index 3504617fe648..dc6ef85e3624 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -11,7 +11,7 @@
* information is.
*/
-#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
+#define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_')
#ifdef CONFIG_X86_32
# define MAX_MPC_ENTRY 1024
@@ -23,8 +23,7 @@
# define MAX_APICS 255
#endif
-struct intel_mp_floating
-{
+struct intel_mp_floating {
char mpf_signature[4]; /* "_MP_" */
unsigned int mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
@@ -39,14 +38,13 @@ struct intel_mp_floating
#define MPC_SIGNATURE "PCMP"
-struct mp_config_table
-{
+struct mp_config_table {
char mpc_signature[4];
unsigned short mpc_length; /* Size of table */
- char mpc_spec; /* 0x01 */
- char mpc_checksum;
- char mpc_oem[8];
- char mpc_productid[12];
+ char mpc_spec; /* 0x01 */
+ char mpc_checksum;
+ char mpc_oem[8];
+ char mpc_productid[12];
unsigned int mpc_oemptr; /* 0 if not present */
unsigned short mpc_oemsize; /* 0 if not present */
unsigned short mpc_oemcount;
@@ -71,8 +69,7 @@ struct mp_config_table
#define CPU_MODEL_MASK 0x00F0
#define CPU_FAMILY_MASK 0x0F00
-struct mpc_config_processor
-{
+struct mpc_config_processor {
unsigned char mpc_type;
unsigned char mpc_apicid; /* Local APIC number */
unsigned char mpc_apicver; /* Its versions */
@@ -82,8 +79,7 @@ struct mpc_config_processor
unsigned int mpc_reserved[2];
};
-struct mpc_config_bus
-{
+struct mpc_config_bus {
unsigned char mpc_type;
unsigned char mpc_busid;
unsigned char mpc_bustype[6];
@@ -111,8 +107,7 @@ struct mpc_config_bus
#define MPC_APIC_USABLE 0x01
-struct mpc_config_ioapic
-{
+struct mpc_config_ioapic {
unsigned char mpc_type;
unsigned char mpc_apicid;
unsigned char mpc_apicver;
@@ -120,8 +115,7 @@ struct mpc_config_ioapic
unsigned int mpc_apicaddr;
};
-struct mpc_config_intsrc
-{
+struct mpc_config_intsrc {
unsigned char mpc_type;
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
@@ -144,8 +138,7 @@ enum mp_irq_source_types {
#define MP_APIC_ALL 0xFF
-struct mpc_config_lintsrc
-{
+struct mpc_config_lintsrc {
unsigned char mpc_type;
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
@@ -157,8 +150,7 @@ struct mpc_config_lintsrc
#define MPC_OEM_SIGNATURE "_OEM"
-struct mp_config_oemtable
-{
+struct mp_config_oemtable {
char oem_signature[4];
unsigned short oem_length; /* Size of table */
char oem_rev; /* 0x01 */
@@ -166,17 +158,6 @@ struct mp_config_oemtable
char mpc_oem[8];
};
-struct mpc_config_translation
-{
- unsigned char mpc_type;
- unsigned char trans_len;
- unsigned char trans_type;
- unsigned char trans_quad;
- unsigned char trans_global;
- unsigned char trans_local;
- unsigned short trans_reserved;
-};
-
/*
* Default configurations
*
@@ -196,4 +177,3 @@ enum mp_bustype {
MP_BUS_MCA,
};
#endif
-
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 5b8acddb70fb..296f29ce426d 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -11,7 +11,8 @@
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR_MASK 0x000000ff
-#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
+#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
+ MSI_DATA_VECTOR_MASK)
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
@@ -37,11 +38,14 @@
#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
-#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */
-#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+ /* dedicated cpu */
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+ /* lowest priority */
#define MSI_ADDR_DEST_ID_SHIFT 12
#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
-#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
+#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+ MSI_ADDR_DEST_ID_MASK)
#endif /* ASM_MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index fae118a25278..09413ad39d3c 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -57,6 +57,8 @@
#define MSR_MTRRfix4K_F8000 0x0000026f
#define MSR_MTRRdefType 0x000002ff
+#define MSR_IA32_CR_PAT 0x00000277
+
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
@@ -83,6 +85,7 @@
/* AMD64 MSRs. Not complete. See the architecture manual for a more
complete list. */
+#define MSR_AMD64_NB_CFG 0xc001001f
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
@@ -109,6 +112,7 @@
#define MSR_K8_SYSCFG 0xc0010010
#define MSR_K8_HWCR 0xc0010015
#define MSR_K8_ENABLE_C1E 0xc0010055
+#define MSR_K8_TSEG_ADDR 0xc0010112
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 3ca29ebebbb1..3707650a169b 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -16,8 +16,8 @@
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
- asm volatile (".byte 0x0f,0x01,0xf9"
- : "=a" (low), "=d" (high), "=c" (*aux));
+ asm volatile(".byte 0x0f,0x01,0xf9"
+ : "=a" (low), "=d" (high), "=c" (*aux));
return low | ((u64)high >> 32);
}
@@ -29,7 +29,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
*/
#ifdef CONFIG_X86_64
#define DECLARE_ARGS(val, low, high) unsigned low, high
-#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32))
+#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
@@ -57,7 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
".previous\n\t"
- _ASM_EXTABLE(2b,3b)
+ _ASM_EXTABLE(2b, 3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high);
@@ -78,10 +78,10 @@ static inline int native_write_msr_safe(unsigned int msr,
".section .fixup,\"ax\"\n\t"
"3: mov %4,%0 ; jmp 1b\n\t"
".previous\n\t"
- _ASM_EXTABLE(2b,3b)
+ _ASM_EXTABLE(2b, 3b)
: "=a" (err)
: "c" (msr), "0" (low), "d" (high),
- "i" (-EFAULT));
+ "i" (-EFAULT));
return err;
}
@@ -116,23 +116,23 @@ static inline unsigned long long native_read_pmc(int counter)
* pointer indirection), this allows gcc to optimize better
*/
-#define rdmsr(msr,val1,val2) \
- do { \
- u64 __val = native_read_msr(msr); \
- (val1) = (u32)__val; \
- (val2) = (u32)(__val >> 32); \
- } while(0)
+#define rdmsr(msr, val1, val2) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (val1) = (u32)__val; \
+ (val2) = (u32)(__val >> 32); \
+} while (0)
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
{
native_write_msr(msr, low, high);
}
-#define rdmsrl(msr,val) \
- ((val) = native_read_msr(msr))
+#define rdmsrl(msr, val) \
+ ((val) = native_read_msr((msr)))
#define wrmsrl(msr, val) \
- native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32))
+ native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
/* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
@@ -141,14 +141,22 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
}
/* rdmsr with exception handling */
-#define rdmsr_safe(msr,p1,p2) \
- ({ \
- int __err; \
- u64 __val = native_read_msr_safe(msr, &__err); \
- (*p1) = (u32)__val; \
- (*p2) = (u32)(__val >> 32); \
- __err; \
- })
+#define rdmsr_safe(msr, p1, p2) \
+({ \
+ int __err; \
+ u64 __val = native_read_msr_safe((msr), &__err); \
+ (*p1) = (u32)__val; \
+ (*p2) = (u32)(__val >> 32); \
+ __err; \
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+ int err;
+
+ *p = native_read_msr_safe(msr, &err);
+ return err;
+}
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
@@ -156,35 +164,37 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
#define rdtscll(val) \
((val) = native_read_tsc())
-#define rdpmc(counter,low,high) \
- do { \
- u64 _l = native_read_pmc(counter); \
- (low) = (u32)_l; \
- (high) = (u32)(_l >> 32); \
- } while(0)
+#define rdpmc(counter, low, high) \
+do { \
+ u64 _l = native_read_pmc((counter)); \
+ (low) = (u32)_l; \
+ (high) = (u32)(_l >> 32); \
+} while (0)
-#define rdtscp(low, high, aux) \
- do { \
- unsigned long long _val = native_read_tscp(&(aux)); \
- (low) = (u32)_val; \
- (high) = (u32)(_val >> 32); \
- } while (0)
+#define rdtscp(low, high, aux) \
+do { \
+ unsigned long long _val = native_read_tscp(&(aux)); \
+ (low) = (u32)_val; \
+ (high) = (u32)(_val >> 32); \
+} while (0)
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
#endif /* !CONFIG_PARAVIRT */
-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
+#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
+ (u32)((val) >> 32))
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2))
-#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
+#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
@@ -195,7 +205,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
}
-static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
+ u32 *l, u32 *h)
{
return rdmsr_safe(msr_no, l, h);
}
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index 319d065800be..a69a01a51729 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -28,8 +28,7 @@
#define MTRR_IOCTL_BASE 'M'
-struct mtrr_sentry
-{
+struct mtrr_sentry {
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
@@ -41,8 +40,7 @@ struct mtrr_sentry
will break. */
#ifdef __i386__
-struct mtrr_gentry
-{
+struct mtrr_gentry {
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
@@ -51,8 +49,7 @@ struct mtrr_gentry
#else /* __i386__ */
-struct mtrr_gentry
-{
+struct mtrr_gentry {
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int regnum; /* Register number */
@@ -86,38 +83,45 @@ struct mtrr_gentry
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
+extern u8 mtrr_type_lookup(u64 addr, u64 end);
extern void mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
-extern int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, bool increment);
-extern int mtrr_add_page (unsigned long base, unsigned long size,
- unsigned int type, bool increment);
-extern int mtrr_del (int reg, unsigned long base, unsigned long size);
-extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
+extern int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, bool increment);
+extern int mtrr_add_page(unsigned long base, unsigned long size,
+ unsigned int type, bool increment);
+extern int mtrr_del(int reg, unsigned long base, unsigned long size);
+extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
+extern int amd_special_default_mtrr(void);
# else
+static inline u8 mtrr_type_lookup(u64 addr, u64 end)
+{
+ /*
+ * Return no-MTRRs:
+ */
+ return 0xff;
+}
#define mtrr_save_fixed_ranges(arg) do {} while (0)
#define mtrr_save_state() do {} while (0)
-static __inline__ int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, bool increment)
+static inline int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, bool increment)
{
return -ENODEV;
}
-static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
+static inline int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment)
{
return -ENODEV;
}
-static __inline__ int mtrr_del (int reg, unsigned long base,
- unsigned long size)
+static inline int mtrr_del(int reg, unsigned long base, unsigned long size)
{
return -ENODEV;
}
-static __inline__ int mtrr_del_page (int reg, unsigned long base,
- unsigned long size)
+static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
return -ENODEV;
}
@@ -125,7 +129,9 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
{
return 0;
}
-static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
+static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+{
+}
#define mtrr_ap_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
@@ -134,15 +140,13 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-struct mtrr_sentry32
-{
+struct mtrr_sentry32 {
compat_ulong_t base; /* Base address */
compat_uint_t size; /* Size of region */
compat_uint_t type; /* Type of region */
};
-struct mtrr_gentry32
-{
+struct mtrr_gentry32 {
compat_ulong_t regnum; /* Register number */
compat_uint_t base; /* Base address */
compat_uint_t size; /* Size of region */
@@ -151,16 +155,17 @@ struct mtrr_gentry32
#define MTRR_IOCTL_BASE 'M'
-#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
-#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
-#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
-#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
-#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
-#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
+#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
+#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
+#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
+#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_PAGE_ENTRY \
+ _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
index bbeefb96ddfd..73e928ef5f03 100644
--- a/include/asm-x86/mutex_32.h
+++ b/include/asm-x86/mutex_32.h
@@ -9,7 +9,7 @@
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
-#include "asm/alternative.h"
+#include <asm/alternative.h>
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
@@ -21,22 +21,20 @@
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " decl (%%eax) \n" \
- " jns 1f \n" \
- " call "#fail_fn" \n" \
- "1: \n" \
- \
- :"=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
} while (0)
@@ -50,8 +48,8 @@ do { \
* wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " incl (%%eax) \n" \
- " jg 1f \n" \
- " call "#fail_fn" \n" \
- "1: \n" \
- \
- :"=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
} while (0)
#define __mutex_slowpath_needs_to_unlock() 1
@@ -104,8 +100,8 @@ do { \
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*/
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
/*
* We have two variants here. The cmpxchg based one is the best one
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index 6c2949a3c677..f3fae9becb38 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -16,23 +16,21 @@
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
-#define __mutex_fastpath_lock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " decl (%%rdi) \n" \
- " jns 1f \n" \
- " call "#fail_fn" \n" \
- "1:" \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
} while (0)
/**
@@ -45,9 +43,8 @@ do { \
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
- int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count,
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
-#define __mutex_fastpath_unlock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " incl (%%rdi) \n" \
- " jg 1f \n" \
- " call "#fail_fn" \n" \
- "1: " \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
} while (0)
#define __mutex_slowpath_needs_to_unlock() 1
@@ -93,8 +88,8 @@ do { \
* if it wasn't 1 originally. [the fallback function is never used on
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
*/
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 53ccac14cead..1e363021e72f 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -1,5 +1,93 @@
-#ifdef CONFIG_X86_32
-# include "nmi_32.h"
+#ifndef _ASM_X86_NMI_H_
+#define _ASM_X86_NMI_H_
+
+#include <linux/pm.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
+
+/**
+ * do_nmi_callback
+ *
+ * Check to see if a callback exists and execute it. Return 1
+ * if the handler exists and was handled successfully.
+ */
+int do_nmi_callback(struct pt_regs *regs, int cpu);
+
+#ifdef CONFIG_PM
+
+/** Replace the PM callback routine for NMI. */
+struct pm_dev *set_nmi_pm_callback(pm_callback callback);
+
+/** Unset the PM callback routine back to the default. */
+void unset_nmi_pm_callback(struct pm_dev *dev);
+
#else
-# include "nmi_64.h"
+
+static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback)
+{
+ return 0;
+}
+
+static inline void unset_nmi_pm_callback(struct pm_dev *dev)
+{
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_X86_64
+extern void default_do_nmi(struct pt_regs *);
+extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
+extern void nmi_watchdog_default(void);
+#else
+#define nmi_watchdog_default() do {} while (0)
+#endif
+
+extern int check_nmi_watchdog(void);
+extern int nmi_watchdog_enabled;
+extern int unknown_nmi_panic;
+extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
+extern int avail_to_resrv_perfctr_nmi(unsigned int);
+extern int reserve_perfctr_nmi(unsigned int);
+extern void release_perfctr_nmi(unsigned int);
+extern int reserve_evntsel_nmi(unsigned int);
+extern void release_evntsel_nmi(unsigned int);
+
+extern void setup_apic_nmi_watchdog(void *);
+extern void stop_apic_nmi_watchdog(void *);
+extern void disable_timer_nmi_watchdog(void);
+extern void enable_timer_nmi_watchdog(void);
+extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
+
+extern atomic_t nmi_active;
+extern unsigned int nmi_watchdog;
+#define NMI_DISABLED -1
+#define NMI_NONE 0
+#define NMI_IO_APIC 1
+#define NMI_LOCAL_APIC 2
+#define NMI_INVALID 3
+#define NMI_DEFAULT NMI_DISABLED
+
+struct ctl_table;
+struct file;
+extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
+ void __user *, size_t *, loff_t *);
+extern int unknown_nmi_panic;
+
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+#endif
+
+void lapic_watchdog_stop(void);
+int lapic_watchdog_init(unsigned nmi_hz);
+int lapic_wd_event(unsigned nmi_hz);
+unsigned lapic_adjust_nmi_hz(unsigned hz);
+int lapic_watchdog_ok(void);
+void disable_lapic_nmi_watchdog(void);
+void enable_lapic_nmi_watchdog(void);
+void stop_nmi(void);
+void restart_nmi(void);
+
#endif
diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h
deleted file mode 100644
index 7206c7e8a388..000000000000
--- a/include/asm-x86/nmi_32.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef ASM_NMI_H
-#define ASM_NMI_H
-
-#include <linux/pm.h>
-#include <asm/irq.h>
-
-#ifdef ARCH_HAS_NMI_WATCHDOG
-
-/**
- * do_nmi_callback
- *
- * Check to see if a callback exists and execute it. Return 1
- * if the handler exists and was handled successfully.
- */
-int do_nmi_callback(struct pt_regs *regs, int cpu);
-
-extern int nmi_watchdog_enabled;
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
-extern int reserve_perfctr_nmi(unsigned int);
-extern void release_perfctr_nmi(unsigned int);
-extern int reserve_evntsel_nmi(unsigned int);
-extern void release_evntsel_nmi(unsigned int);
-
-extern void setup_apic_nmi_watchdog (void *);
-extern void stop_apic_nmi_watchdog (void *);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-
-extern atomic_t nmi_active;
-extern unsigned int nmi_watchdog;
-#define NMI_DISABLED -1
-#define NMI_NONE 0
-#define NMI_IO_APIC 1
-#define NMI_LOCAL_APIC 2
-#define NMI_INVALID 3
-#define NMI_DEFAULT NMI_DISABLED
-
-struct ctl_table;
-struct file;
-extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
- void __user *, size_t *, loff_t *);
-extern int unknown_nmi_panic;
-
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-
-#endif
-
-void lapic_watchdog_stop(void);
-int lapic_watchdog_init(unsigned nmi_hz);
-int lapic_wd_event(unsigned nmi_hz);
-unsigned lapic_adjust_nmi_hz(unsigned hz);
-int lapic_watchdog_ok(void);
-void disable_lapic_nmi_watchdog(void);
-void enable_lapic_nmi_watchdog(void);
-void stop_nmi(void);
-void restart_nmi(void);
-
-#endif /* ASM_NMI_H */
diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h
deleted file mode 100644
index 2eeb74e5f3ff..000000000000
--- a/include/asm-x86/nmi_64.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef ASM_NMI_H
-#define ASM_NMI_H
-
-#include <linux/pm.h>
-#include <asm/io.h>
-
-/**
- * do_nmi_callback
- *
- * Check to see if a callback exists and execute it. Return 1
- * if the handler exists and was handled successfully.
- */
-int do_nmi_callback(struct pt_regs *regs, int cpu);
-
-#ifdef CONFIG_PM
-
-/** Replace the PM callback routine for NMI. */
-struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-
-/** Unset the PM callback routine back to the default. */
-void unset_nmi_pm_callback(struct pm_dev * dev);
-
-#else
-
-static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-{
- return 0;
-}
-
-static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-{
-}
-
-#endif /* CONFIG_PM */
-
-extern void default_do_nmi(struct pt_regs *);
-extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
-
-#define get_nmi_reason() inb(0x61)
-
-extern int unknown_nmi_panic;
-extern int nmi_watchdog_enabled;
-
-extern int check_nmi_watchdog(void);
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
-extern int reserve_perfctr_nmi(unsigned int);
-extern void release_perfctr_nmi(unsigned int);
-extern int reserve_evntsel_nmi(unsigned int);
-extern void release_evntsel_nmi(unsigned int);
-
-extern void setup_apic_nmi_watchdog (void *);
-extern void stop_apic_nmi_watchdog (void *);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-
-extern void nmi_watchdog_default(void);
-
-extern atomic_t nmi_active;
-extern unsigned int nmi_watchdog;
-#define NMI_DISABLED -1
-#define NMI_NONE 0
-#define NMI_IO_APIC 1
-#define NMI_LOCAL_APIC 2
-#define NMI_INVALID 3
-#define NMI_DEFAULT NMI_DISABLED
-
-struct ctl_table;
-struct file;
-extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
- void __user *, size_t *, loff_t *);
-
-extern int unknown_nmi_panic;
-
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-
-
-void lapic_watchdog_stop(void);
-int lapic_watchdog_init(unsigned nmi_hz);
-int lapic_wd_event(unsigned nmi_hz);
-unsigned lapic_adjust_nmi_hz(unsigned hz);
-int lapic_watchdog_ok(void);
-void disable_lapic_nmi_watchdog(void);
-void enable_lapic_nmi_watchdog(void);
-void stop_nmi(void);
-void restart_nmi(void);
-
-#endif /* ASM_NMI_H */
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index b3930ae539b3..ad0bedd10b89 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -5,6 +5,8 @@
/* generic versions from gas
1: nop
+ the following instructions are NOT nops in 64-bit mode,
+ for 64-bit mode use K8 or P6 nops instead
2: movl %esi,%esi
3: leal 0x00(%esi),%esi
4: leal 0x00(,%esi,1),%esi
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 15fe07cde586..32c22ae0709f 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -1,11 +1,12 @@
-#ifndef _ASM_X8664_NUMA_H
+#ifndef _ASM_X8664_NUMA_H
#define _ASM_X8664_NUMA_H 1
#include <linux/nodemask.h>
#include <asm/apicdef.h>
struct bootnode {
- u64 start,end;
+ u64 start;
+ u64 end;
};
extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 38f710dc37f2..94b86c31239a 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2002, IBM Corp.
*
- * All rights reserved.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void);
/*
* SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
*/
-#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */
+#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private
+ quad space */
/*
* Communication area for each processor on lynxer-processor tests.
@@ -139,7 +140,7 @@ struct sys_cfg_data {
unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */
unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
/* may not be totally populated */
- unsigned int split_mem_enbl; /* 0 for no low shared memory */
+ unsigned int split_mem_enbl; /* 0 for no low shared memory */
unsigned int mmio_sz; /* Size of total system memory mapped I/O */
/* (in MB). */
unsigned int quad_spin_lock; /* Spare location used for quad */
@@ -152,7 +153,7 @@ struct sys_cfg_data {
/*
* memory configuration area for each quad
*/
- struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
+ struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
};
static inline unsigned long *get_zholes_size(int nid)
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index a05b2896492f..6724a4bc6b7a 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -33,10 +33,8 @@
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
-#define max_pfn_mapped end_pfn_map
#else
#include <asm/page_32.h>
-#define max_pfn_mapped max_low_pfn
#endif /* CONFIG_X86_64 */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
@@ -50,6 +48,8 @@
extern int page_is_ram(unsigned long pagenr);
+extern unsigned long max_pfn_mapped;
+
struct page;
static inline void clear_user_page(void *page, unsigned long vaddr,
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 5f7257fd589b..424e82f8ae27 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -47,7 +47,10 @@ typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;
-typedef union { pteval_t pte, pte_low; } pte_t;
+typedef union {
+ pteval_t pte;
+ pteval_t pte_low;
+} pte_t;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
@@ -61,7 +64,7 @@ typedef struct page *pgtable_t;
#endif
#ifndef __ASSEMBLY__
-#define __phys_addr(x) ((x)-PAGE_OFFSET)
+#define __phys_addr(x) ((x) - PAGE_OFFSET)
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
#ifdef CONFIG_FLATMEM
@@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index 143546073b95..6ea72859c491 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -5,7 +5,7 @@
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE-1))
+#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
@@ -48,10 +48,10 @@
#define __VIRTUAL_MASK_SHIFT 48
/*
- * Kernel image size is limited to 128 MB (see level2_kernel_pgt in
+ * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
-#define KERNEL_IMAGE_SIZE (128*1024*1024)
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
@@ -59,7 +59,6 @@ void clear_page(void *page);
void copy_page(void *to, void *from);
extern unsigned long end_pfn;
-extern unsigned long end_pfn_map;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
@@ -81,6 +80,9 @@ typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
+extern unsigned long init_memory_mapping(unsigned long start,
+ unsigned long end);
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index c996ec4da0c8..6f0d0422f4ca 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -3,8 +3,8 @@
#ifdef __KERNEL__
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+# define USER_HZ 100 /* some user interfaces are */
+# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
#endif
#ifndef HZ
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index d6236eb46466..3d419398499b 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -231,7 +231,8 @@ struct pv_mmu_ops {
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval);
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@@ -246,7 +247,8 @@ struct pv_mmu_ops {
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
- void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp);
#endif /* CONFIG_X86_PAE */
@@ -274,8 +276,7 @@ struct pv_mmu_ops {
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
* what to patch. */
-struct paravirt_patch_template
-{
+struct paravirt_patch_template {
struct pv_init_ops pv_init_ops;
struct pv_time_ops pv_time_ops;
struct pv_cpu_ops pv_cpu_ops;
@@ -660,43 +661,56 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
}
/* These should all do BUG_ON(_err), but our headers are too tangled. */
-#define rdmsr(msr,val1,val2) do { \
+#define rdmsr(msr, val1, val2) \
+do { \
int _err; \
u64 _l = paravirt_read_msr(msr, &_err); \
val1 = (u32)_l; \
val2 = _l >> 32; \
-} while(0)
+} while (0)
-#define wrmsr(msr,val1,val2) do { \
+#define wrmsr(msr, val1, val2) \
+do { \
paravirt_write_msr(msr, val1, val2); \
-} while(0)
+} while (0)
-#define rdmsrl(msr,val) do { \
+#define rdmsrl(msr, val) \
+do { \
int _err; \
val = paravirt_read_msr(msr, &_err); \
-} while(0)
+} while (0)
-#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
-#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
+#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
/* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({ \
+#define rdmsr_safe(msr, a, b) \
+({ \
int _err; \
u64 _l = paravirt_read_msr(msr, &_err); \
(*a) = (u32)_l; \
(*b) = _l >> 32; \
- _err; })
+ _err; \
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+ int err;
+ *p = paravirt_read_msr(msr, &err);
+ return err;
+}
static inline u64 paravirt_read_tsc(void)
{
return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
}
-#define rdtscl(low) do { \
+#define rdtscl(low) \
+do { \
u64 _l = paravirt_read_tsc(); \
low = (int)_l; \
-} while(0)
+} while (0)
#define rdtscll(val) (val = paravirt_read_tsc())
@@ -711,11 +725,12 @@ static inline unsigned long long paravirt_read_pmc(int counter)
return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
}
-#define rdpmc(counter,low,high) do { \
+#define rdpmc(counter, low, high) \
+do { \
u64 _l = paravirt_read_pmc(counter); \
low = (u32)_l; \
high = _l >> 32; \
-} while(0)
+} while (0)
static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
@@ -794,7 +809,8 @@ static inline void set_iopl_mask(unsigned mask)
}
/* The paravirtualized I/O functions */
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
pv_cpu_ops.io_delay();
#ifdef REALLY_SLOW_IO
pv_cpu_ops.io_delay();
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 019cbca24a38..3c4ffeb467e9 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,10 +1,10 @@
#ifndef _ASM_X86_PARPORT_H
#define _ASM_X86_PARPORT_H
-static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
- return parport_pc_find_isa_ports (autoirq, autodma);
+ return parport_pc_find_isa_ports(autoirq, autodma);
}
#endif /* _ASM_X86_PARPORT_H */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
new file mode 100644
index 000000000000..8b822b5a1786
--- /dev/null
+++ b/include/asm-x86/pat.h
@@ -0,0 +1,16 @@
+
+#ifndef _ASM_PAT_H
+#define _ASM_PAT_H 1
+
+#include <linux/types.h>
+
+extern int pat_wc_enabled;
+
+extern void pat_init(void);
+
+extern int reserve_memtype(u64 start, u64 end,
+ unsigned long req_type, unsigned long *ret_type);
+extern int free_memtype(u64 start, u64 end);
+
+#endif
+
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 6823fa4f1afa..5b21485be573 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
/* Direct PCI access. This is used for PCI accesses in early boot before
- the PCI subsystem works. */
+ the PCI subsystem works. */
extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index c61190cb9e12..ddd8e248fc0a 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -8,14 +8,13 @@
#include <asm/scatterlist.h>
#include <asm/io.h>
-
#ifdef __KERNEL__
struct pci_sysdata {
int domain; /* PCI domain */
int node; /* NUMA node */
#ifdef CONFIG_X86_64
- void* iommu; /* IOMMU private data */
+ void *iommu; /* IOMMU private data */
#endif
};
@@ -52,7 +51,7 @@ extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
+struct pci_bus *pcibios_scan_root(int bus);
void pcibios_set_master(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
@@ -62,7 +61,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+ enum pci_mmap_state mmap_state,
+ int write_combine);
#ifdef CONFIG_PCI
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index 374690314539..df867e5d80b1 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -1,12 +1,10 @@
#ifndef __x8664_PCI_H
#define __x8664_PCI_H
-
#ifdef __KERNEL__
-
#ifdef CONFIG_CALGARY_IOMMU
-static inline void* pci_iommu(struct pci_bus *bus)
+static inline void *pci_iommu(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
return sd->iommu;
@@ -19,11 +17,10 @@ static inline void set_pci_iommu(struct pci_bus *bus, void *val)
}
#endif /* CONFIG_CALGARY_IOMMU */
-
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-
-
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 value);
extern void pci_iommu_alloc(void);
@@ -65,5 +62,4 @@ extern void pci_iommu_alloc(void);
#endif /* __KERNEL__ */
-
#endif /* __x8664_PCI_H */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index c0305bff0f19..101fb9e11954 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -22,7 +22,6 @@ struct x8664_pda {
offset 40!!! */
#endif
char *irqstackptr;
- unsigned int nodenumber; /* number of current node */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
short mmu_state;
@@ -58,34 +57,36 @@ extern struct x8664_pda _proxy_pda;
#define pda_offset(field) offsetof(struct x8664_pda, field)
-#define pda_to_op(op, field, val) do { \
- typedef typeof(_proxy_pda.field) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- case 4: \
- asm(op "l %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i" (pda_offset(field))); \
- break; \
- case 8: \
- asm(op "q %1,%%gs:%c2": \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
- } while (0)
+#define pda_to_op(op, field, val) \
+do { \
+ typedef typeof(_proxy_pda.field) T__; \
+ if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
+ switch (sizeof(_proxy_pda.field)) { \
+ case 2: \
+ asm(op "w %1,%%gs:%c2" : \
+ "+m" (_proxy_pda.field) : \
+ "ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ case 4: \
+ asm(op "l %1,%%gs:%c2" : \
+ "+m" (_proxy_pda.field) : \
+ "ri" ((T__)val), \
+ "i" (pda_offset(field))); \
+ break; \
+ case 8: \
+ asm(op "q %1,%%gs:%c2": \
+ "+m" (_proxy_pda.field) : \
+ "ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ default: \
+ __bad_pda_field(); \
+ } \
+} while (0)
-#define pda_from_op(op,field) ({ \
+#define pda_from_op(op, field) \
+({ \
typeof(_proxy_pda.field) ret__; \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
@@ -93,23 +94,24 @@ extern struct x8664_pda _proxy_pda;
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
- break; \
+ break; \
case 4: \
asm(op "l %%gs:%c1,%0": \
"=r" (ret__): \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
- break; \
+ break; \
case 8: \
asm(op "q %%gs:%c1,%0": \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
- break; \
+ break; \
default: \
__bad_pda_field(); \
- } \
- ret__; })
+ } \
+ ret__; \
+})
#define read_pda(field) pda_from_op("mov", field)
#define write_pda(field, val) pda_to_op("mov", field, val)
@@ -118,12 +120,13 @@ extern struct x8664_pda _proxy_pda;
#define or_pda(field, val) pda_to_op("or", field, val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit, field) ({ \
- int old__; \
- asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (_proxy_pda.field) \
- : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
- old__; \
+#define test_and_clear_bit_pda(bit, field) \
+({ \
+ int old__; \
+ asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (_proxy_pda.field) \
+ : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
+ old__; \
})
#endif
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 0dec00f27eb4..736fc3bb8e1e 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -85,58 +85,62 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
* don't give an lvalue though). */
extern void __bad_percpu_size(void);
-#define percpu_to_op(op,var,val) \
- do { \
- typedef typeof(var) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } \
- switch (sizeof(var)) { \
- case 1: \
- asm(op "b %1,"__percpu_seg"%0" \
- : "+m" (var) \
- :"ri" ((T__)val)); \
- break; \
- case 2: \
- asm(op "w %1,"__percpu_seg"%0" \
- : "+m" (var) \
- :"ri" ((T__)val)); \
- break; \
- case 4: \
- asm(op "l %1,"__percpu_seg"%0" \
- : "+m" (var) \
- :"ri" ((T__)val)); \
- break; \
- default: __bad_percpu_size(); \
- } \
- } while (0)
-
-#define percpu_from_op(op,var) \
- ({ \
- typeof(var) ret__; \
- switch (sizeof(var)) { \
- case 1: \
- asm(op "b "__percpu_seg"%1,%0" \
- : "=r" (ret__) \
- : "m" (var)); \
- break; \
- case 2: \
- asm(op "w "__percpu_seg"%1,%0" \
- : "=r" (ret__) \
- : "m" (var)); \
- break; \
- case 4: \
- asm(op "l "__percpu_seg"%1,%0" \
- : "=r" (ret__) \
- : "m" (var)); \
- break; \
- default: __bad_percpu_size(); \
- } \
- ret__; })
+#define percpu_to_op(op, var, val) \
+do { \
+ typedef typeof(var) T__; \
+ if (0) { \
+ T__ tmp__; \
+ tmp__ = (val); \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
+ case 2: \
+ asm(op "w %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
+ case 4: \
+ asm(op "l %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
+#define percpu_from_op(op, var) \
+({ \
+ typeof(var) ret__; \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ ret__; \
+})
#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
+#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
+#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
+#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
#endif /* !__ASSEMBLY__ */
#endif /* !CONFIG_X86_64 */
#endif /* _ASM_X86_PERCPU_H_ */
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 701404fab308..46bc52c0eae1 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -26,7 +26,8 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
native_set_pte(ptep, pte);
}
-static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
+static inline void native_set_pte_present(struct mm_struct *mm,
+ unsigned long addr,
pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
@@ -37,7 +38,8 @@ static inline void native_pmd_clear(pmd_t *pmdp)
native_set_pmd(pmdp, __pmd(0));
}
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
+static inline void native_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *xp)
{
*xp = native_make_pte(0);
}
@@ -61,16 +63,18 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
*/
#define PTE_FILE_MAX_BITS 29
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5))
-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+#define pgoff_to_pte(off) \
+ ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \
+ (((off) >> 5) << 8) + _PAGE_FILE })
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x1f)
#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 1d763eec740f..8b4a9d44b7f4 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -8,22 +8,26 @@
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
+ __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016Lx).\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016Lx).\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))
static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}
+
static inline int pud_bad(pud_t pud)
{
return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
}
+
static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
@@ -48,7 +52,8 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
* we are justified in merely clearing the PTE present bit, followed
* by a set. The ordering here is important.
*/
-static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
+static inline void native_set_pte_present(struct mm_struct *mm,
+ unsigned long addr,
pte_t *ptep, pte_t pte)
{
ptep->pte_low = 0;
@@ -60,15 +65,17 @@ static inline void native_set_pte_present(struct mm_struct *mm, unsigned long ad
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
- set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
+ set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
}
+
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
}
+
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
- set_64bit((unsigned long long *)(pudp),native_pud_val(pud));
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
}
/*
@@ -76,7 +83,8 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud)
* entry, so clear the bottom half first and enforce ordering with a compiler
* barrier.
*/
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
ptep->pte_low = 0;
smp_wmb();
@@ -107,20 +115,19 @@ static inline void pud_clear(pud_t *pudp)
* current pgd to avoid unnecessary TLB flushes.
*/
pgd = read_cr3();
- if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
+ if (__pa(pudp) >= pgd && __pa(pudp) <
+ (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
write_cr3(pgd);
}
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK))
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
- pmd_index(address))
+#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \
+ pmd_index(address))
#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
@@ -161,7 +168,8 @@ static inline unsigned long pte_pfn(pte_t pte)
* put the 32 bits of offset into the high part.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
+#define pgoff_to_pte(off) \
+ ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
#define PTE_FILE_MAX_BITS 32
/* Encode and de-code a swap entry */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 9cf472aeb9ce..f1d9f4a03f6f 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -4,13 +4,13 @@
#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
#define FIRST_USER_ADDRESS 0
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PRESENT 0 /* is present */
+#define _PAGE_BIT_RW 1 /* writeable */
+#define _PAGE_BIT_USER 2 /* userspace addressable */
+#define _PAGE_BIT_PWT 3 /* page write through */
+#define _PAGE_BIT_PCD 4 /* page cache disabled */
+#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
+#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
#define _PAGE_BIT_FILE 6
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
@@ -48,24 +48,39 @@
#endif
/* If _PAGE_PRESENT is clear, we use these: */
-#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
+ * saved PTE; unset:swap */
#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
pte_present gives true */
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
+ _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
+#define _PAGE_CACHE_WB (0)
+#define _PAGE_CACHE_WC (_PAGE_PWT)
+#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
+#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
+ _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
#define PAGE_COPY PAGE_COPY_NOEXEC
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
#ifdef CONFIG_X86_32
#define _PAGE_KERNEL_EXEC \
@@ -84,6 +99,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
@@ -101,6 +117,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
+#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC)
#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS)
#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
@@ -134,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
@@ -144,30 +161,101 @@ extern struct list_head pgd_list;
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
-static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; }
-static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
-
-static inline int pmd_large(pmd_t pte) {
- return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
- (_PAGE_PSE|_PAGE_PRESENT);
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_RW;
+}
+
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PSE;
+}
+
+static inline int pte_global(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_GLOBAL;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return !(pte_val(pte) & _PAGE_NX);
+}
+
+static inline int pmd_large(pmd_t pte)
+{
+ return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ (_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
}
-static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); }
-static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); }
-static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); }
-static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); }
-static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
-static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
-static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
-static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
-static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); }
-static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); }
-static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); }
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_PSE);
+}
+
+static inline pte_t pte_clrhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
+}
+
+static inline pte_t pte_mkglobal(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_GLOBAL);
+}
+
+static inline pte_t pte_clrglobal(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
+}
extern pteval_t __supported_pte_mask;
@@ -334,7 +422,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
})
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
pte_update(mm, addr, ptep);
@@ -342,7 +431,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ int full)
{
pte_t pte;
if (full) {
@@ -358,7 +449,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
pte_update(mm, addr, ptep);
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 4e6a0fca0b47..c4a643674458 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -40,13 +40,13 @@ void paging_init(void);
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level-defs.h>
# define PMD_SIZE (1UL << PMD_SHIFT)
-# define PMD_MASK (~(PMD_SIZE-1))
+# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level-defs.h>
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
@@ -58,21 +58,22 @@ void paging_init(void);
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
-#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START (((unsigned long) high_memory + \
- 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \
+ & ~(VMALLOC_OFFSET - 1))
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
-#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
+ & PMD_MASK)
#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
/*
@@ -88,10 +89,16 @@ extern unsigned long pg0[];
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x) (!(unsigned long)pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define pmd_none(x) (!(unsigned long)pmd_val((x)))
+#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
+extern int pmd_bad(pmd_t pmd);
+
+#define pmd_bad_v1(x) \
+ (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER)))
+#define pmd_bad_v2(x) \
+ (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \
+ _PAGE_PSE | _PAGE_NX)))
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
@@ -117,17 +124,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
}
/*
- * Macro to mark a page protection value as "uncacheable". On processors which do not support
- * it, this is a no-op.
+ * Macro to mark a page protection value as "uncacheable".
+ * On processors which do not support it, this is a no-op.
*/
-#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
- ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+#define pgprot_noncached(prot) \
+ ((boot_cpu_data.x86 > 3) \
+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \
+ : (prot))
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/*
@@ -136,20 +144,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
* this macro returns the index of the entry in the pgd page which would
* control the given virtual address
*/
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_index_k(addr) pgd_index(addr)
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_index_k(addr) pgd_index((addr))
/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
static inline int pud_large(pud_t pud) { return 0; }
@@ -159,8 +167,8 @@ static inline int pud_large(pud_t pud) { return 0; }
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
-#define pmd_index(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_index(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -168,43 +176,45 @@ static inline int pud_large(pud_t pud) { return 0; }
* this macro returns the index of the entry in the pte page which would
* control the given virtual address
*/
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK))
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
+ pte_index((address)))
+#define pte_offset_map_nested(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
#else
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
+#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#endif
/* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, vaddr, ptep); \
- __flush_tlb_one(vaddr); \
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
+ __flush_tlb_one((vaddr)); \
} while (0)
/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
-#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define update_mmu_cache(vma, address, pte) do { } while (0)
void native_pagetable_setup_start(pgd_t *base);
void native_pagetable_setup_done(pgd_t *base);
@@ -233,7 +243,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
#define kern_addr_valid(kaddr) (0)
#endif
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 0a0b77bc736a..9fd87d0b6477 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -52,14 +52,18 @@ extern void paging_init(void);
#ifndef __ASSEMBLY__
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pud_ERROR(e) \
- printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pud_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))
#define pgd_none(x) (!pgd_val(x))
#define pud_none(x) (!pud_val(x))
@@ -87,7 +91,8 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
return native_make_pte(xchg(&xp->pte, 0));
#else
- /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */
+ /* native_local_ptep_get_and_clear,
+ but duplicated because of cyclic dependency */
pte_t ret = *xp;
native_pte_clear(NULL, 0, xp);
return ret;
@@ -119,7 +124,7 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
*pgdp = pgd;
}
-static inline void native_pgd_clear(pgd_t * pgd)
+static inline void native_pgd_clear(pgd_t *pgd)
{
native_set_pgd(pgd, native_make_pgd(0));
}
@@ -128,19 +133,19 @@ static inline void native_pgd_clear(pgd_t * pgd)
#endif /* !__ASSEMBLY__ */
-#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
-#define MAXMEM _AC(0x3fffffffffff, UL)
+#define MAXMEM _AC(0x00003fffffffffff, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
-#define MODULES_VADDR _AC(0xffffffff88000000, UL)
+#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
#define MODULES_END _AC(0xfffffffffff00000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
@@ -153,26 +158,28 @@ static inline unsigned long pgd_bad(pgd_t pgd)
static inline unsigned long pud_bad(pud_t pud)
{
- return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+ return pud_val(pud) &
+ ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX);
}
static inline unsigned long pmd_bad(pmd_t pmd)
{
- return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+ return pmd_val(pmd) &
+ ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX);
}
-#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_none(x) (!pte_val((x)))
+#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
+#define pte_page(x) pfn_to_page(pte_pfn((x)))
+#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
/*
* Macro to mark a page protection value as "uncacheable".
*/
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-
+#define pgprot_noncached(prot) \
+ (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -182,75 +189,81 @@ static inline unsigned long pmd_bad(pmd_t pmd)
/*
* Level 4 access.
*/
-#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
-#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
+#define pgd_page_vaddr(pgd) \
+ ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK))
+#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address)))
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
/* PUD - Level3 access */
/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
-#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+#define pud_page_vaddr(pud) \
+ ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
+#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+#define pud_offset(pgd, address) \
+ ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
+#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
static inline int pud_large(pud_t pte)
{
- return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
- (_PAGE_PSE|_PAGE_PRESENT);
+ return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ (_PAGE_PSE | _PAGE_PRESENT);
}
/* PMD - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
- pmd_index(address))
-#define pmd_none(x) (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE })
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK))
+#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
+ pmd_index(address))
+#define pmd_none(x) (!pmd_val((x)))
+#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
+#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
+#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
+ _PAGE_FILE })
#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
/* PTE - Level 1 access. */
/* page, protection -> pte */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
+
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
- pte_index(address))
+ pte_index((address)))
/* x86-64 always has all page tables mapped. */
-#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) /* NOP */
-#define pte_unmap_nested(pte) /* NOP */
+#define pte_unmap_nested(pte) /* NOP */
+
+#define update_mmu_cache(vma, address, pte) do { } while (0)
-#define update_mmu_cache(vma,address,pte) do { } while (0)
+extern int direct_gbpages;
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x3f)
#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
+ ((offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
-extern int kern_addr_valid(unsigned long addr);
+extern int kern_addr_valid(unsigned long addr);
extern void cleanup_highmap(void);
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -263,8 +276,10 @@ extern void cleanup_highmap(void);
/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-#define kc_offset_to_vaddr(o) \
- (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+#define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \
+ ? ((o) | ~__VIRTUAL_MASK) \
+ : (o))
#define __HAVE_ARCH_PTE_SAME
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h
index bb7133dc155d..fe312a5ba204 100644
--- a/include/asm-x86/posix_types.h
+++ b/include/asm-x86/posix_types.h
@@ -1,11 +1,5 @@
#ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-# include "posix_types_32.h"
-# else
-# include "posix_types_64.h"
-# endif
-#else
-# ifdef __i386__
+# if defined(CONFIG_X86_32) || defined(__i386__)
# include "posix_types_32.h"
# else
# include "posix_types_64.h"
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
index 015e539cdef5..b031efda37ec 100644
--- a/include/asm-x86/posix_types_32.h
+++ b/include/asm-x86/posix_types_32.h
@@ -45,32 +45,39 @@ typedef struct {
#if defined(__KERNEL__)
#undef __FD_SET
-#define __FD_SET(fd,fdsetp) \
- __asm__ __volatile__("btsl %1,%0": \
- "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+#define __FD_SET(fd,fdsetp) \
+ asm volatile("btsl %1,%0": \
+ "+m" (*(__kernel_fd_set *)(fdsetp)) \
+ : "r" ((int)(fd)))
#undef __FD_CLR
-#define __FD_CLR(fd,fdsetp) \
- __asm__ __volatile__("btrl %1,%0": \
- "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+#define __FD_CLR(fd,fdsetp) \
+ asm volatile("btrl %1,%0": \
+ "+m" (*(__kernel_fd_set *)(fdsetp)) \
+ : "r" ((int) (fd)))
#undef __FD_ISSET
-#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
- unsigned char __result; \
- __asm__ __volatile__("btl %1,%2 ; setb %0" \
- :"=q" (__result) :"r" ((int) (fd)), \
- "m" (*(__kernel_fd_set *) (fdsetp))); \
- __result; }))
+#define __FD_ISSET(fd,fdsetp) \
+ (__extension__ \
+ ({ \
+ unsigned char __result; \
+ asm volatile("btl %1,%2 ; setb %0" \
+ : "=q" (__result) \
+ : "r" ((int)(fd)), \
+ "m" (*(__kernel_fd_set *)(fdsetp))); \
+ __result; \
+}))
#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) \
-do { \
- int __d0, __d1; \
- __asm__ __volatile__("cld ; rep ; stosl" \
- :"=m" (*(__kernel_fd_set *) (fdsetp)), \
- "=&c" (__d0), "=&D" (__d1) \
- :"a" (0), "1" (__FDSET_LONGS), \
- "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
+#define __FD_ZERO(fdsetp) \
+do { \
+ int __d0, __d1; \
+ asm volatile("cld ; rep ; stosl" \
+ : "=m" (*(__kernel_fd_set *)(fdsetp)), \
+ "=&c" (__d0), "=&D" (__d1) \
+ : "a" (0), "1" (__FDSET_LONGS), \
+ "2" ((__kernel_fd_set *)(fdsetp)) \
+ : "memory"); \
} while (0)
#endif /* defined(__KERNEL__) */
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
index 9926aa43775b..d6624c95854a 100644
--- a/include/asm-x86/posix_types_64.h
+++ b/include/asm-x86/posix_types_64.h
@@ -46,7 +46,7 @@ typedef unsigned long __kernel_old_dev_t;
#ifdef __KERNEL__
#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
@@ -54,7 +54,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
}
#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
@@ -62,7 +62,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
}
#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
+static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
@@ -74,36 +74,36 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
* for 256 and 1024-bit fd_sets respectively)
*/
#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+static inline void __FD_ZERO(__kernel_fd_set *p)
{
unsigned long *tmp = p->fds_bits;
int i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
- case 32:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
- tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
- tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
- tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
- return;
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
+ case 32:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
+ tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
+ tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
+ tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
+ return;
+ case 16:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ return;
+ case 8:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ return;
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
}
}
i = __FDSET_LONGS;
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 45a2f0ab33d0..6e26c7c717a2 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -3,8 +3,7 @@
#include <asm/processor-flags.h>
-/* migration helpers, for KVM - will be removed in 2.6.25: */
-#include <asm/vm86.h>
+/* migration helper, for KVM - will be removed in 2.6.25: */
#define Xgt_desc_struct desc_ptr
/* Forward declaration, a strange C thing */
@@ -24,6 +23,7 @@ struct mm_struct;
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
+
#include <linux/personality.h>
#include <linux/cpumask.h>
#include <linux/cache.h>
@@ -37,16 +37,18 @@ struct mm_struct;
static inline void *current_text_addr(void)
{
void *pc;
- asm volatile("mov $1f,%0\n1:":"=r" (pc));
+
+ asm volatile("mov $1f, %0; 1:":"=r" (pc));
+
return pc;
}
#ifdef CONFIG_X86_VSMP
-#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
-#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
+# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
+# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
#else
-#define ARCH_MIN_TASKALIGN 16
-#define ARCH_MIN_MMSTRUCT_ALIGN 0
+# define ARCH_MIN_TASKALIGN 16
+# define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
/*
@@ -56,69 +58,82 @@ static inline void *current_text_addr(void)
*/
struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
- __u8 x86_mask;
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
#ifdef CONFIG_X86_32
- char wp_works_ok; /* It doesn't on 386's */
- char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
- char hard_math;
- char rfu;
- char fdiv_bug;
- char f00f_bug;
- char coma_bug;
- char pad0;
+ char wp_works_ok; /* It doesn't on 386's */
+
+ /* Problems on some 486Dx4's and old 386's: */
+ char hlt_works_ok;
+ char hard_math;
+ char rfu;
+ char fdiv_bug;
+ char f00f_bug;
+ char coma_bug;
+ char pad0;
#else
- /* number of 4K pages in DTLB/ITLB combined(in pages)*/
- int x86_tlbsize;
- __u8 x86_virt_bits, x86_phys_bits;
- /* cpuid returned core id bits */
- __u8 x86_coreid_bits;
- /* Max extended CPUID function supported */
- __u32 extended_cpuid_level;
+ /* Number of 4K pages in DTLB/ITLB combined(in pages): */
+ int x86_tlbsize;
+ __u8 x86_virt_bits;
+ __u8 x86_phys_bits;
+ /* CPUID returned core id bits: */
+ __u8 x86_coreid_bits;
+ /* Max extended CPUID function supported: */
+ __u32 extended_cpuid_level;
#endif
- int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
- __u32 x86_capability[NCAPINTS];
- char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB - valid for CPUS which support this
- call */
- int x86_cache_alignment; /* In bytes */
- int x86_power;
- unsigned long loops_per_jiffy;
+ /* Maximum supported CPUID level, -1=no CPUID: */
+ int cpuid_level;
+ __u32 x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ /* in KB - valid for CPUS which support this call: */
+ int x86_cache_size;
+ int x86_cache_alignment; /* In bytes */
+ int x86_power;
+ unsigned long loops_per_jiffy;
#ifdef CONFIG_SMP
- cpumask_t llc_shared_map; /* cpus sharing the last level cache */
+ /* cpus sharing the last level cache: */
+ cpumask_t llc_shared_map;
#endif
- u16 x86_max_cores; /* cpuid returned max cores value */
- u16 apicid;
- u16 x86_clflush_size;
+ /* cpuid returned max cores value: */
+ u16 x86_max_cores;
+ u16 apicid;
+ u16 initial_apicid;
+ u16 x86_clflush_size;
#ifdef CONFIG_SMP
- u16 booted_cores; /* number of cores as seen by OS */
- u16 phys_proc_id; /* Physical processor id. */
- u16 cpu_core_id; /* Core id */
- u16 cpu_index; /* index into per_cpu list */
+ /* number of cores as seen by the OS: */
+ u16 booted_cores;
+ /* Physical processor id: */
+ u16 phys_proc_id;
+ /* Core id: */
+ u16 cpu_core_id;
+ /* Index into per_cpu list: */
+ u16 cpu_index;
#endif
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NSC 8
-#define X86_VENDOR_NUM 9
-#define X86_VENDOR_UNKNOWN 0xff
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_NUM 9
+
+#define X86_VENDOR_UNKNOWN 0xff
/*
* capabilities of CPUs
*/
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct cpuinfo_x86 new_cpu_data;
-extern struct tss_struct doublefault_tss;
-extern __u32 cleared_cpu_caps[NCAPINTS];
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 new_cpu_data;
+
+extern struct tss_struct doublefault_tss;
+extern __u32 cleared_cpu_caps[NCAPINTS];
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@@ -129,7 +144,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define current_cpu_data boot_cpu_data
#endif
-void cpu_detect(struct cpuinfo_x86 *c);
+static inline int hlt_works(int cpu)
+{
+#ifdef CONFIG_X86_32
+ return cpu_data(cpu).hlt_works_ok;
+#else
+ return 1;
+#endif
+}
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern void cpu_detect(struct cpuinfo_x86 *c);
extern void identify_cpu(struct cpuinfo_x86 *);
extern void identify_boot_cpu(void);
@@ -146,15 +172,15 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
+ unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (*eax), "2" (*ecx));
+ asm("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
}
static inline void load_cr3(pgd_t *pgdir)
@@ -165,54 +191,67 @@ static inline void load_cr3(pgd_t *pgdir)
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
- unsigned short back_link, __blh;
- unsigned long sp0;
- unsigned short ss0, __ss0h;
- unsigned long sp1;
- unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
- unsigned long sp2;
- unsigned short ss2, __ss2h;
- unsigned long __cr3;
- unsigned long ip;
- unsigned long flags;
- unsigned long ax, cx, dx, bx;
- unsigned long sp, bp, si, di;
- unsigned short es, __esh;
- unsigned short cs, __csh;
- unsigned short ss, __ssh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
- unsigned short ldt, __ldth;
- unsigned short trace, io_bitmap_base;
+ unsigned short back_link, __blh;
+ unsigned long sp0;
+ unsigned short ss0, __ss0h;
+ unsigned long sp1;
+ /* ss1 caches MSR_IA32_SYSENTER_CS: */
+ unsigned short ss1, __ss1h;
+ unsigned long sp2;
+ unsigned short ss2, __ss2h;
+ unsigned long __cr3;
+ unsigned long ip;
+ unsigned long flags;
+ unsigned long ax;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long bx;
+ unsigned long sp;
+ unsigned long bp;
+ unsigned long si;
+ unsigned long di;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace;
+ unsigned short io_bitmap_base;
+
} __attribute__((packed));
#else
struct x86_hw_tss {
- u32 reserved1;
- u64 sp0;
- u64 sp1;
- u64 sp2;
- u64 reserved2;
- u64 ist[7];
- u32 reserved3;
- u32 reserved4;
- u16 reserved5;
- u16 io_bitmap_base;
+ u32 reserved1;
+ u64 sp0;
+ u64 sp1;
+ u64 sp2;
+ u64 reserved2;
+ u64 ist[7];
+ u32 reserved3;
+ u32 reserved4;
+ u16 reserved5;
+ u16 io_bitmap_base;
+
} __attribute__((packed)) ____cacheline_aligned;
#endif
/*
- * Size of io_bitmap.
+ * IO-bitmap sizes:
*/
-#define IO_BITMAP_BITS 65536
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
+#define IO_BITMAP_BITS 65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
struct tss_struct {
- struct x86_hw_tss x86_tss;
+ /*
+ * The hardware state:
+ */
+ struct x86_hw_tss x86_tss;
/*
* The extra 1 is there because the CPU will access an
@@ -220,90 +259,108 @@ struct tss_struct {
* bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
/*
* Cache the current maximum and the last task that used the bitmap:
*/
- unsigned long io_bitmap_max;
- struct thread_struct *io_bitmap_owner;
+ unsigned long io_bitmap_max;
+ struct thread_struct *io_bitmap_owner;
+
/*
- * pads the TSS to be cacheline-aligned (size is 0x100)
+ * Pad the TSS to be cacheline-aligned (size is 0x100):
*/
- unsigned long __cacheline_filler[35];
+ unsigned long __cacheline_filler[35];
/*
- * .. and then another 0x100 bytes for emergency kernel stack
+ * .. and then another 0x100 bytes for the emergency kernel stack:
*/
- unsigned long stack[64];
+ unsigned long stack[64];
+
} __attribute__((packed));
DECLARE_PER_CPU(struct tss_struct, init_tss);
-/* Save the original ist values for checking stack pointers during debugging */
+/*
+ * Save the original ist values for checking stack pointers during debugging
+ */
struct orig_ist {
- unsigned long ist[7];
+ unsigned long ist[7];
};
#define MXCSR_DEFAULT 0x1f80
struct i387_fsave_struct {
- u32 cwd;
- u32 swd;
- u32 twd;
- u32 fip;
- u32 fcs;
- u32 foo;
- u32 fos;
- u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- u32 status; /* software status information */
+ u32 cwd; /* FPU Control Word */
+ u32 swd; /* FPU Status Word */
+ u32 twd; /* FPU Tag Word */
+ u32 fip; /* FPU IP Offset */
+ u32 fcs; /* FPU IP Selector */
+ u32 foo; /* FPU Operand Pointer Offset */
+ u32 fos; /* FPU Operand Pointer Selector */
+
+ /* 8*10 bytes for each FP-reg = 80 bytes: */
+ u32 st_space[20];
+
+ /* Software status information [not touched by FSAVE ]: */
+ u32 status;
};
struct i387_fxsave_struct {
- u16 cwd;
- u16 swd;
- u16 twd;
- u16 fop;
+ u16 cwd; /* Control Word */
+ u16 swd; /* Status Word */
+ u16 twd; /* Tag Word */
+ u16 fop; /* Last Instruction Opcode */
union {
struct {
- u64 rip;
- u64 rdp;
+ u64 rip; /* Instruction Pointer */
+ u64 rdp; /* Data Pointer */
};
struct {
- u32 fip;
- u32 fcs;
- u32 foo;
- u32 fos;
+ u32 fip; /* FPU IP Offset */
+ u32 fcs; /* FPU IP Selector */
+ u32 foo; /* FPU Operand Offset */
+ u32 fos; /* FPU Operand Selector */
};
};
- u32 mxcsr;
- u32 mxcsr_mask;
- u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
- u32 padding[24];
+ u32 mxcsr; /* MXCSR Register State */
+ u32 mxcsr_mask; /* MXCSR Mask */
+
+ /* 8*16 bytes for each FP-reg = 128 bytes: */
+ u32 st_space[32];
+
+ /* 16*16 bytes for each XMM-reg = 256 bytes: */
+ u32 xmm_space[64];
+
+ u32 padding[24];
+
} __attribute__((aligned(16)));
struct i387_soft_struct {
- u32 cwd;
- u32 swd;
- u32 twd;
- u32 fip;
- u32 fcs;
- u32 foo;
- u32 fos;
- u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- u8 ftop, changed, lookahead, no_update, rm, alimit;
- struct info *info;
- u32 entry_eip;
+ u32 cwd;
+ u32 swd;
+ u32 twd;
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ /* 8*10 bytes for each FP-reg = 80 bytes: */
+ u32 st_space[20];
+ u8 ftop;
+ u8 changed;
+ u8 lookahead;
+ u8 no_update;
+ u8 rm;
+ u8 alimit;
+ struct info *info;
+ u32 entry_eip;
};
union i387_union {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
- struct i387_soft_struct soft;
+ struct i387_soft_struct soft;
};
-#ifdef CONFIG_X86_32
-DECLARE_PER_CPU(u8, cpu_llc_id);
-#else
+#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
#endif
@@ -313,42 +370,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
struct thread_struct {
-/* cached TLS descriptors. */
- struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
- unsigned long sp0;
- unsigned long sp;
+ /* Cached TLS descriptors: */
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+ unsigned long sp0;
+ unsigned long sp;
#ifdef CONFIG_X86_32
- unsigned long sysenter_cs;
+ unsigned long sysenter_cs;
#else
- unsigned long usersp; /* Copy from PDA */
- unsigned short es, ds, fsindex, gsindex;
+ unsigned long usersp; /* Copy from PDA */
+ unsigned short es;
+ unsigned short ds;
+ unsigned short fsindex;
+ unsigned short gsindex;
#endif
- unsigned long ip;
- unsigned long fs;
- unsigned long gs;
-/* Hardware debugging registers */
- unsigned long debugreg0;
- unsigned long debugreg1;
- unsigned long debugreg2;
- unsigned long debugreg3;
- unsigned long debugreg6;
- unsigned long debugreg7;
-/* fault info */
- unsigned long cr2, trap_no, error_code;
-/* floating point info */
+ unsigned long ip;
+ unsigned long fs;
+ unsigned long gs;
+ /* Hardware debugging registers: */
+ unsigned long debugreg0;
+ unsigned long debugreg1;
+ unsigned long debugreg2;
+ unsigned long debugreg3;
+ unsigned long debugreg6;
+ unsigned long debugreg7;
+ /* Fault info: */
+ unsigned long cr2;
+ unsigned long trap_no;
+ unsigned long error_code;
+ /* Floating point info: */
union i387_union i387 __attribute__((aligned(16)));;
#ifdef CONFIG_X86_32
-/* virtual 86 mode info */
+ /* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
unsigned long screen_bitmap;
- unsigned long v86flags, v86mask, saved_sp0;
- unsigned int saved_fs, saved_gs;
+ unsigned long v86flags;
+ unsigned long v86mask;
+ unsigned long saved_sp0;
+ unsigned int saved_fs;
+ unsigned int saved_gs;
#endif
-/* IO permissions */
- unsigned long *io_bitmap_ptr;
- unsigned long iopl;
-/* max allowed port in the bitmap, in bytes: */
- unsigned io_bitmap_max;
+ /* IO permissions: */
+ unsigned long *io_bitmap_ptr;
+ unsigned long iopl;
+ /* Max allowed port in the bitmap, in bytes: */
+ unsigned io_bitmap_max;
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
unsigned long debugctlmsr;
/* Debug Store - if not 0 points to a DS Save Area configuration;
@@ -358,21 +423,27 @@ struct thread_struct {
static inline unsigned long native_get_debugreg(int regno)
{
- unsigned long val = 0; /* Damn you, gcc! */
+ unsigned long val = 0; /* Damn you, gcc! */
switch (regno) {
case 0:
- asm("mov %%db0, %0" :"=r" (val)); break;
+ asm("mov %%db0, %0" :"=r" (val));
+ break;
case 1:
- asm("mov %%db1, %0" :"=r" (val)); break;
+ asm("mov %%db1, %0" :"=r" (val));
+ break;
case 2:
- asm("mov %%db2, %0" :"=r" (val)); break;
+ asm("mov %%db2, %0" :"=r" (val));
+ break;
case 3:
- asm("mov %%db3, %0" :"=r" (val)); break;
+ asm("mov %%db3, %0" :"=r" (val));
+ break;
case 6:
- asm("mov %%db6, %0" :"=r" (val)); break;
+ asm("mov %%db6, %0" :"=r" (val));
+ break;
case 7:
- asm("mov %%db7, %0" :"=r" (val)); break;
+ asm("mov %%db7, %0" :"=r" (val));
+ break;
default:
BUG();
}
@@ -383,22 +454,22 @@ static inline void native_set_debugreg(int regno, unsigned long value)
{
switch (regno) {
case 0:
- asm("mov %0,%%db0" : /* no output */ :"r" (value));
+ asm("mov %0, %%db0" ::"r" (value));
break;
case 1:
- asm("mov %0,%%db1" : /* no output */ :"r" (value));
+ asm("mov %0, %%db1" ::"r" (value));
break;
case 2:
- asm("mov %0,%%db2" : /* no output */ :"r" (value));
+ asm("mov %0, %%db2" ::"r" (value));
break;
case 3:
- asm("mov %0,%%db3" : /* no output */ :"r" (value));
+ asm("mov %0, %%db3" ::"r" (value));
break;
case 6:
- asm("mov %0,%%db6" : /* no output */ :"r" (value));
+ asm("mov %0, %%db6" ::"r" (value));
break;
case 7:
- asm("mov %0,%%db7" : /* no output */ :"r" (value));
+ asm("mov %0, %%db7" ::"r" (value));
break;
default:
BUG();
@@ -412,23 +483,24 @@ static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
unsigned int reg;
- __asm__ __volatile__ ("pushfl;"
- "popl %0;"
- "andl %1, %0;"
- "orl %2, %0;"
- "pushl %0;"
- "popfl"
- : "=&r" (reg)
- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+
+ asm volatile ("pushfl;"
+ "popl %0;"
+ "andl %1, %0;"
+ "orl %2, %0;"
+ "pushl %0;"
+ "popfl"
+ : "=&r" (reg)
+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
#endif
}
-static inline void native_load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
+static inline void
+native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
{
tss->x86_tss.sp0 = thread->sp0;
#ifdef CONFIG_X86_32
- /* Only happens when SEP is enabled, no need to test "SEP"arately */
+ /* Only happens when SEP is enabled, no need to test "SEP"arately: */
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
tss->x86_tss.ss1 = thread->sysenter_cs;
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
@@ -446,8 +518,8 @@ static inline void native_swapgs(void)
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define __cpuid native_cpuid
-#define paravirt_enabled() 0
+#define __cpuid native_cpuid
+#define paravirt_enabled() 0
/*
* These special macros can be used to get or set a debugging register
@@ -473,11 +545,12 @@ static inline void load_sp0(struct tss_struct *tss,
* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
-extern unsigned long mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
unsigned cr4;
+
mmu_cr4_features |= mask;
cr4 = read_cr4();
cr4 |= mask;
@@ -487,6 +560,7 @@ static inline void set_in_cr4(unsigned long mask)
static inline void clear_in_cr4(unsigned long mask)
{
unsigned cr4;
+
mmu_cr4_features &= ~mask;
cr4 = read_cr4();
cr4 &= ~mask;
@@ -494,42 +568,42 @@ static inline void clear_in_cr4(unsigned long mask)
}
struct microcode_header {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int datasize;
- unsigned int totalsize;
- unsigned int reserved[3];
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
};
struct microcode {
- struct microcode_header hdr;
- unsigned int bits[0];
+ struct microcode_header hdr;
+ unsigned int bits[0];
};
-typedef struct microcode microcode_t;
-typedef struct microcode_header microcode_header_t;
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
/* microcode format is extended from prescott processors */
struct extended_signature {
- unsigned int sig;
- unsigned int pf;
- unsigned int cksum;
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
};
struct extended_sigtable {
- unsigned int count;
- unsigned int cksum;
- unsigned int reserved[3];
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
struct extended_signature sigs[0];
};
typedef struct {
- unsigned long seg;
+ unsigned long seg;
} mm_segment_t;
@@ -541,7 +615,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
+/* Prepare to copy thread state - unlazy all lazy state */
extern void prepare_to_copy(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
@@ -578,118 +652,137 @@ static inline unsigned int cpuid_eax(unsigned int op)
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
+
return eax;
}
+
static inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
+
return ebx;
}
+
static inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
+
return ecx;
}
+
static inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
+
return edx;
}
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
- __asm__ __volatile__("rep;nop": : :"memory");
+ asm volatile("rep; nop" ::: "memory");
}
-/* Stop speculative execution */
+static inline void cpu_relax(void)
+{
+ rep_nop();
+}
+
+/* Stop speculative execution: */
static inline void sync_core(void)
{
int tmp;
+
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
- : "ebx", "ecx", "edx", "memory");
+ : "ebx", "ecx", "edx", "memory");
}
-#define cpu_relax() rep_nop()
-
static inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
+ unsigned long edx)
{
- /* "monitor %eax,%ecx,%edx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc8;"
- : :"a" (eax), "c" (ecx), "d"(edx));
+ /* "monitor %eax, %ecx, %edx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
+ :: "a" (eax), "c" (ecx), "d"(edx));
}
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
- /* "mwait %eax,%ecx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc9;"
- : :"a" (eax), "c" (ecx));
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
}
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
- /* "mwait %eax,%ecx;" */
- asm volatile(
- "sti; .byte 0x0f,0x01,0xc9;"
- : :"a" (eax), "c" (ecx));
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
}
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
-extern int force_mwait;
+extern int force_mwait;
extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern unsigned long boot_option_idle_override;
+extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
/* Defined in head.S */
-extern struct desc_ptr early_gdt_descr;
+extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void cpu_init(void);
extern void init_gdt(int cpu);
-/* from system description table in BIOS. Mostly for MCA use, but
- * others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
+static inline void update_debugctlmsr(unsigned long debugctlmsr)
+{
+#ifndef CONFIG_X86_DEBUGCTLMSR
+ if (boot_cpu_data.x86 < 6)
+ return;
+#endif
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+}
-/* Boot loader type from the setup header */
-extern int bootloader_type;
+/*
+ * from system description table in BIOS. Mostly for MCA use, but
+ * others may find it useful:
+ */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+
+/* Boot loader type from the setup header: */
+extern int bootloader_type;
-extern char ignore_fpu_irq;
-#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+extern char ignore_fpu_irq;
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32
-#define BASE_PREFETCH ASM_NOP4
-#define ARCH_HAS_PREFETCH
+# define BASE_PREFETCH ASM_NOP4
+# define ARCH_HAS_PREFETCH
#else
-#define BASE_PREFETCH "prefetcht0 (%1)"
+# define BASE_PREFETCH "prefetcht0 (%1)"
#endif
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
- because they are microcoded there and very slow.
- However we don't do prefetches for pre XP Athlons currently
- That should be fixed. */
+/*
+ * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
+ *
+ * It's not worth to care about 3dnow prefetches for the K6
+ * because they are microcoded there and very slow.
+ */
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH,
@@ -698,8 +791,11 @@ static inline void prefetch(const void *x)
"r" (x));
}
-/* 3dnow! prefetch to get an exclusive cache line. Useful for
- spinlocks to avoid one state transition in the cache coherency protocol. */
+/*
+ * 3dnow prefetch to get an exclusive cache line.
+ * Useful for spinlocks to avoid one state transition in the
+ * cache coherency protocol:
+ */
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH,
@@ -708,21 +804,25 @@ static inline void prefetchw(const void *x)
"r" (x));
}
-#define spin_lock_prefetch(x) prefetchw(x)
+static inline void spin_lock_prefetch(const void *x)
+{
+ prefetchw(x);
+}
+
#ifdef CONFIG_X86_32
/*
* User space process size: 3GB (default).
*/
-#define TASK_SIZE (PAGE_OFFSET)
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
-
-#define INIT_THREAD { \
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
- .vm86_info = NULL, \
- .sysenter_cs = __KERNEL_CS, \
- .io_bitmap_ptr = NULL, \
- .fs = __KERNEL_PERCPU, \
+#define TASK_SIZE PAGE_OFFSET
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+#define INIT_THREAD { \
+ .sp0 = sizeof(init_stack) + (long)&init_stack, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+ .fs = __KERNEL_PERCPU, \
}
/*
@@ -731,28 +831,15 @@ static inline void prefetchw(const void *x)
* permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
-#define INIT_TSS { \
- .x86_tss = { \
+#define INIT_TSS { \
+ .x86_tss = { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
- .ss0 = __KERNEL_DS, \
- .ss1 = __KERNEL_CS, \
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
- }, \
- .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
-}
-
-#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%gs": :"r" (0)); \
- regs->fs = 0; \
- set_fs(USER_DS); \
- regs->ds = __USER_DS; \
- regs->es = __USER_DS; \
- regs->ss = __USER_DS; \
- regs->cs = __USER_CS; \
- regs->ip = new_eip; \
- regs->sp = new_esp; \
-} while (0)
-
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+ }, \
+ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
+}
extern unsigned long thread_saved_pc(struct task_struct *tsk);
@@ -780,24 +867,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
__regs__ - 1; \
})
-#define KSTK_ESP(task) (task_pt_regs(task)->sp)
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#else
/*
* User space process size. 47bits minus one guard page.
*/
-#define TASK_SIZE64 (0x800000000000UL - 4096)
+#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
- 0xc0000000 : 0xFFFFe000)
+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+ 0xc0000000 : 0xFFFFe000)
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
- IA32_PAGE_OFFSET : TASK_SIZE64)
-#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
- IA32_PAGE_OFFSET : TASK_SIZE64)
+#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE64)
+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE64)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE64
@@ -810,33 +897,25 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
-#define start_thread(regs, new_rip, new_rsp) do { \
- asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
- load_gs_index(0); \
- (regs)->ip = (new_rip); \
- (regs)->sp = (new_rsp); \
- write_pda(oldrsp, (new_rsp)); \
- (regs)->cs = __USER_CS; \
- (regs)->ss = __USER_DS; \
- (regs)->flags = 0x200; \
- set_fs(USER_DS); \
-} while (0)
-
/*
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
-#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
-#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
#endif /* CONFIG_X86_64 */
-/* This decides where the kernel will search for a free chunk of vm
+extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+ unsigned long new_sp);
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-#define KSTK_EIP(task) (task_pt_regs(task)->ip)
+#define KSTK_EIP(task) (task_pt_regs(task)->ip)
#endif
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 68563c0709ac..1e17bcce450e 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -7,8 +7,6 @@
extern void early_idt_handler(void);
-extern void init_memory_mapping(unsigned long start, unsigned long end);
-
extern void system_call(void);
extern void syscall_init(void);
@@ -26,7 +24,7 @@ extern int reboot_force;
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
-#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
-#define round_down(x,y) ((x) & ~((y)-1))
+#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
+#define round_down(x, y) ((x) & ~((y) - 1))
#endif
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index d9e04b46a440..24ec061566c5 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -36,23 +36,23 @@ struct pt_regs {
#else /* __KERNEL__ */
struct pt_regs {
- long bx;
- long cx;
- long dx;
- long si;
- long di;
- long bp;
- long ax;
- int ds;
- int es;
- int fs;
+ unsigned long bx;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long si;
+ unsigned long di;
+ unsigned long bp;
+ unsigned long ax;
+ unsigned long ds;
+ unsigned long es;
+ unsigned long fs;
/* int gs; */
- long orig_ax;
- long ip;
- int cs;
- long flags;
- long sp;
- int ss;
+ unsigned long orig_ax;
+ unsigned long ip;
+ unsigned long cs;
+ unsigned long flags;
+ unsigned long sp;
+ unsigned long ss;
};
#include <asm/vm86.h>
@@ -140,12 +140,16 @@ extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
#ifdef CONFIG_X86_32
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code);
#else
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
#endif
-#define regs_return_value(regs) ((regs)->ax)
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->ax;
+}
/*
* user_mode_vm(regs) determines whether a register set came from user mode.
@@ -166,8 +170,8 @@ static inline int user_mode(struct pt_regs *regs)
static inline int user_mode_vm(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
- return ((regs->cs & SEGMENT_RPL_MASK) |
- (regs->flags & VM_MASK)) >= USER_RPL;
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
#else
return user_mode(regs);
#endif
@@ -176,7 +180,7 @@ static inline int user_mode_vm(struct pt_regs *regs)
static inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
- return (regs->flags & VM_MASK);
+ return (regs->flags & X86_VM_MASK);
#else
return 0; /* No V86 mode support in long mode */
#endif
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e9e3ffc22c07..6b5233b4f84b 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -3,8 +3,7 @@
struct pt_regs;
-struct machine_ops
-{
+struct machine_ops {
void (*restart)(char *cmd);
void (*halt)(void);
void (*power_off)(void);
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 46f725b0bc82..2557514d7ef6 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -3,16 +3,17 @@
#include <asm/asm.h>
-#define TRACE_RESUME(user) do { \
+#define TRACE_RESUME(user) \
+do { \
if (pm_trace_enabled) { \
void *tracedata; \
asm volatile(_ASM_MOV_UL " $1f,%0\n" \
- ".section .tracedata,\"a\"\n" \
- "1:\t.word %c1\n\t" \
- _ASM_PTR " %c2\n" \
- ".previous" \
- :"=r" (tracedata) \
- : "i" (__LINE__), "i" (__FILE__)); \
+ ".section .tracedata,\"a\"\n" \
+ "1:\t.word %c1\n\t" \
+ _ASM_PTR " %c2\n" \
+ ".previous" \
+ :"=r" (tracedata) \
+ : "i" (__LINE__), "i" (__FILE__)); \
generate_resume_trace(tracedata, user); \
} \
} while (0)
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index 97cdcc9887ba..3451c576e6af 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -11,53 +11,53 @@
#define RIO_TABLE_VERSION 3
struct rio_table_hdr {
- u8 version; /* Version number of this data structure */
- u8 num_scal_dev; /* # of Scalability devices */
- u8 num_rio_dev; /* # of RIO I/O devices */
+ u8 version; /* Version number of this data structure */
+ u8 num_scal_dev; /* # of Scalability devices */
+ u8 num_rio_dev; /* # of RIO I/O devices */
} __attribute__((packed));
struct scal_detail {
- u8 node_id; /* Scalability Node ID */
- u32 CBAR; /* Address of 1MB register space */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF = None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port2node; /* Node ID port connected to: 0xFF = None */
- u8 port2port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
+ u8 node_id; /* Scalability Node ID */
+ u32 CBAR; /* Address of 1MB register space */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF = None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port2node; /* Node ID port connected to: 0xFF = None */
+ u8 port2port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
} __attribute__((packed));
struct rio_detail {
- u8 node_id; /* RIO Node ID */
- u32 BBAR; /* Address of 1MB register space */
- u8 type; /* Type of device */
- u8 owner_id; /* Node ID of Hurricane that owns this */
- /* node */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF=None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 first_slot; /* Lowest slot number below this Calgary */
- u8 status; /* Bit 0 = 1 : the XAPIC is used */
- /* = 0 : the XAPIC is not used, ie: */
- /* ints fwded to another XAPIC */
- /* Bits1:7 Reserved */
- u8 WP_index; /* instance index - lower ones have */
- /* lower slot numbers/PCI bus numbers */
- u8 chassis_num; /* 1 based Chassis number */
+ u8 node_id; /* RIO Node ID */
+ u32 BBAR; /* Address of 1MB register space */
+ u8 type; /* Type of device */
+ u8 owner_id; /* Node ID of Hurricane that owns this */
+ /* node */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF=None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 first_slot; /* Lowest slot number below this Calgary */
+ u8 status; /* Bit 0 = 1 : the XAPIC is used */
+ /* = 0 : the XAPIC is not used, ie: */
+ /* ints fwded to another XAPIC */
+ /* Bits1:7 Reserved */
+ u8 WP_index; /* instance index - lower ones have */
+ /* lower slot numbers/PCI bus numbers */
+ u8 chassis_num; /* 1 based Chassis number */
} __attribute__((packed));
enum {
- HURR_SCALABILTY = 0, /* Hurricane Scalability info */
- HURR_RIOIB = 2, /* Hurricane RIOIB info */
- COMPAT_CALGARY = 4, /* Compatibility Calgary */
- ALT_CALGARY = 5, /* Second Planar Calgary */
+ HURR_SCALABILTY = 0, /* Hurricane Scalability info */
+ HURR_RIOIB = 2, /* Hurricane RIOIB info */
+ COMPAT_CALGARY = 4, /* Compatibility Calgary */
+ ALT_CALGARY = 5, /* Second Planar Calgary */
};
/*
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
index 520a379f4b80..750f2a3542b3 100644
--- a/include/asm-x86/rwsem.h
+++ b/include/asm-x86/rwsem.h
@@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore *
/*
* the semaphore definition
*/
-struct rw_semaphore {
- signed long count;
+
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define R