From 96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 11 Oct 2007 11:20:03 +0200 Subject: i386/x86_64: move headers to include/asm-x86 Move the headers to include/asm-x86 and fixup the header install make rules Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/8253pit.h | 5 + include/asm-x86/8253pit_32.h | 12 + include/asm-x86/8253pit_64.h | 10 + include/asm-x86/Kbuild | 88 ++ include/asm-x86/a.out.h | 13 + include/asm-x86/a.out_32.h | 27 + include/asm-x86/a.out_64.h | 28 + include/asm-x86/acpi.h | 5 + include/asm-x86/acpi_32.h | 147 +++ include/asm-x86/acpi_64.h | 153 +++ include/asm-x86/agp.h | 5 + include/asm-x86/agp_32.h | 36 + include/asm-x86/agp_64.h | 34 + include/asm-x86/alternative-asm.i | 5 + include/asm-x86/alternative-asm_32.i | 12 + include/asm-x86/alternative-asm_64.i | 12 + include/asm-x86/alternative.h | 5 + include/asm-x86/alternative_32.h | 154 +++ include/asm-x86/alternative_64.h | 159 +++ include/asm-x86/apic.h | 5 + include/asm-x86/apic_32.h | 126 +++ include/asm-x86/apic_64.h | 107 ++ include/asm-x86/apicdef.h | 5 + include/asm-x86/apicdef_32.h | 375 +++++++ include/asm-x86/apicdef_64.h | 392 ++++++++ include/asm-x86/arch_hooks.h | 30 + include/asm-x86/atomic.h | 5 + include/asm-x86/atomic_32.h | 266 +++++ include/asm-x86/atomic_64.h | 466 +++++++++ include/asm-x86/auxvec.h | 13 + include/asm-x86/auxvec_32.h | 11 + include/asm-x86/auxvec_64.h | 6 + include/asm-x86/bitops.h | 5 + include/asm-x86/bitops_32.h | 423 ++++++++ include/asm-x86/bitops_64.h | 427 ++++++++ include/asm-x86/boot.h | 20 + include/asm-x86/bootparam.h | 86 ++ include/asm-x86/bootsetup.h | 40 + include/asm-x86/bug.h | 5 + include/asm-x86/bug_32.h | 37 + include/asm-x86/bug_64.h | 34 + include/asm-x86/bugs.h | 5 + include/asm-x86/bugs_32.h | 12 + include/asm-x86/bugs_64.h | 6 + include/asm-x86/byteorder.h | 13 + include/asm-x86/byteorder_32.h | 58 ++ include/asm-x86/byteorder_64.h | 33 + include/asm-x86/cache.h | 5 + include/asm-x86/cache_32.h | 14 + include/asm-x86/cache_64.h | 26 + include/asm-x86/cacheflush.h | 5 + include/asm-x86/cacheflush_32.h | 39 + include/asm-x86/cacheflush_64.h | 35 + include/asm-x86/calgary.h | 72 ++ include/asm-x86/calling.h | 162 +++ include/asm-x86/checksum.h | 5 + include/asm-x86/checksum_32.h | 191 ++++ include/asm-x86/checksum_64.h | 195 ++++ include/asm-x86/cmpxchg.h | 5 + include/asm-x86/cmpxchg_32.h | 289 ++++++ include/asm-x86/cmpxchg_64.h | 134 +++ include/asm-x86/compat.h | 212 ++++ include/asm-x86/cpu.h | 22 + include/asm-x86/cpufeature.h | 5 + include/asm-x86/cpufeature_32.h | 175 ++++ include/asm-x86/cpufeature_64.h | 30 + include/asm-x86/cputime.h | 5 + include/asm-x86/cputime_32.h | 6 + include/asm-x86/cputime_64.h | 6 + include/asm-x86/current.h | 5 + include/asm-x86/current_32.h | 17 + include/asm-x86/current_64.h | 27 + include/asm-x86/debugreg.h | 13 + include/asm-x86/debugreg_32.h | 64 ++ include/asm-x86/debugreg_64.h | 65 ++ include/asm-x86/delay.h | 5 + include/asm-x86/delay_32.h | 31 + include/asm-x86/delay_64.h | 30 + include/asm-x86/desc.h | 5 + include/asm-x86/desc_32.h | 244 +++++ include/asm-x86/desc_64.h | 174 ++++ include/asm-x86/desc_defs.h | 69 ++ include/asm-x86/device.h | 5 + include/asm-x86/device_32.h | 15 + include/asm-x86/device_64.h | 15 + include/asm-x86/div64.h | 5 + include/asm-x86/div64_32.h | 52 + include/asm-x86/div64_64.h | 1 + include/asm-x86/dma-mapping.h | 5 + include/asm-x86/dma-mapping_32.h | 186 ++++ include/asm-x86/dma-mapping_64.h | 203 ++++ include/asm-x86/dma.h | 5 + include/asm-x86/dma_32.h | 297 ++++++ include/asm-x86/dma_64.h | 304 ++++++ include/asm-x86/dmi.h | 5 + include/asm-x86/dmi_32.h | 11 + include/asm-x86/dmi_64.h | 24 + include/asm-x86/dwarf2.h | 5 + include/asm-x86/dwarf2_32.h | 61 ++ include/asm-x86/dwarf2_64.h | 57 ++ include/asm-x86/e820.h | 5 + include/asm-x86/e820_32.h | 60 ++ include/asm-x86/e820_64.h | 61 ++ include/asm-x86/edac.h | 5 + include/asm-x86/edac_32.h | 18 + include/asm-x86/edac_64.h | 18 + include/asm-x86/elf.h | 13 + include/asm-x86/elf_32.h | 163 ++++ include/asm-x86/elf_64.h | 180 ++++ include/asm-x86/emergency-restart.h | 6 + include/asm-x86/errno.h | 13 + include/asm-x86/errno_32.h | 6 + include/asm-x86/errno_64.h | 6 + include/asm-x86/fb.h | 5 + include/asm-x86/fb_32.h | 17 + include/asm-x86/fb_64.h | 19 + include/asm-x86/fcntl.h | 1 + include/asm-x86/fixmap.h | 5 + include/asm-x86/fixmap_32.h | 157 +++ include/asm-x86/fixmap_64.h | 92 ++ include/asm-x86/floppy.h | 5 + include/asm-x86/floppy_32.h | 284 ++++++ include/asm-x86/floppy_64.h | 283 ++++++ include/asm-x86/fpu32.h | 10 + include/asm-x86/frame.i | 23 + include/asm-x86/futex.h | 5 + include/asm-x86/futex_32.h | 135 +++ include/asm-x86/futex_64.h | 125 +++ include/asm-x86/genapic.h | 5 + include/asm-x86/genapic_32.h | 127 +++ include/asm-x86/genapic_64.h | 37 + include/asm-x86/geode.h | 159 +++ include/asm-x86/hardirq.h | 5 + include/asm-x86/hardirq_32.h | 23 + include/asm-x86/hardirq_64.h | 23 + include/asm-x86/highmem.h | 85 ++ include/asm-x86/hpet.h | 5 + include/asm-x86/hpet_32.h | 90 ++ include/asm-x86/hpet_64.h | 18 + include/asm-x86/hw_irq.h | 5 + include/asm-x86/hw_irq_32.h | 66 ++ include/asm-x86/hw_irq_64.h | 175 ++++ include/asm-x86/hypertransport.h | 42 + include/asm-x86/i387.h | 5 + include/asm-x86/i387_32.h | 151 +++ include/asm-x86/i387_64.h | 209 ++++ include/asm-x86/i8253.h | 5 + include/asm-x86/i8253_32.h | 17 + include/asm-x86/i8253_64.h | 6 + include/asm-x86/i8259.h | 17 + include/asm-x86/ia32.h | 178 ++++ include/asm-x86/ia32_unistd.h | 18 + include/asm-x86/ide.h | 78 ++ include/asm-x86/idle.h | 14 + include/asm-x86/intel_arch_perfmon.h | 5 + include/asm-x86/intel_arch_perfmon_32.h | 31 + include/asm-x86/intel_arch_perfmon_64.h | 31 + include/asm-x86/io.h | 5 + include/asm-x86/io_32.h | 349 +++++++ include/asm-x86/io_64.h | 276 ++++++ include/asm-x86/io_apic.h | 5 + include/asm-x86/io_apic_32.h | 155 +++ include/asm-x86/io_apic_64.h | 136 +++ include/asm-x86/ioctl.h | 1 + include/asm-x86/ioctls.h | 13 + include/asm-x86/ioctls_32.h | 87 ++ include/asm-x86/ioctls_64.h | 86 ++ include/asm-x86/iommu.h | 29 + include/asm-x86/ipc.h | 1 + include/asm-x86/ipcbuf.h | 13 + include/asm-x86/ipcbuf_32.h | 29 + include/asm-x86/ipcbuf_64.h | 29 + include/asm-x86/ipi.h | 128 +++ include/asm-x86/irq.h | 5 + include/asm-x86/irq_32.h | 48 + include/asm-x86/irq_64.h | 51 + include/asm-x86/irq_regs.h | 5 + include/asm-x86/irq_regs_32.h | 29 + include/asm-x86/irq_regs_64.h | 1 + include/asm-x86/irqflags.h | 5 + include/asm-x86/irqflags_32.h | 163 ++++ include/asm-x86/irqflags_64.h | 142 +++ include/asm-x86/ist.h | 34 + include/asm-x86/k8.h | 14 + include/asm-x86/kdebug.h | 5 + include/asm-x86/kdebug_32.h | 33 + include/asm-x86/kdebug_64.h | 36 + include/asm-x86/kexec.h | 5 + include/asm-x86/kexec_32.h | 99 ++ include/asm-x86/kexec_64.h | 94 ++ include/asm-x86/kmap_types.h | 5 + include/asm-x86/kmap_types_32.h | 30 + include/asm-x86/kmap_types_64.h | 19 + include/asm-x86/kprobes.h | 5 + include/asm-x86/kprobes_32.h | 92 ++ include/asm-x86/kprobes_64.h | 90 ++ include/asm-x86/ldt.h | 13 + include/asm-x86/ldt_32.h | 32 + include/asm-x86/ldt_64.h | 36 + include/asm-x86/linkage.h | 5 + include/asm-x86/linkage_32.h | 15 + include/asm-x86/linkage_64.h | 6 + include/asm-x86/local.h | 5 + include/asm-x86/local_32.h | 233 +++++ include/asm-x86/local_64.h | 222 +++++ include/asm-x86/mach-bigsmp/mach_apic.h | 158 +++ include/asm-x86/mach-bigsmp/mach_apicdef.h | 13 + include/asm-x86/mach-bigsmp/mach_ipi.h | 25 + include/asm-x86/mach-bigsmp/mach_mpspec.h | 8 + include/asm-x86/mach-default/apm.h | 75 ++ include/asm-x86/mach-default/bios_ebda.h | 15 + include/asm-x86/mach-default/do_timer.h | 16 + include/asm-x86/mach-default/entry_arch.h | 34 + include/asm-x86/mach-default/io_ports.h | 25 + include/asm-x86/mach-default/irq_vectors.h | 96 ++ include/asm-x86/mach-default/irq_vectors_limits.h | 16 + include/asm-x86/mach-default/mach_apic.h | 131 +++ include/asm-x86/mach-default/mach_apicdef.h | 13 + include/asm-x86/mach-default/mach_ipi.h | 54 + include/asm-x86/mach-default/mach_mpparse.h | 28 + include/asm-x86/mach-default/mach_mpspec.h | 12 + include/asm-x86/mach-default/mach_reboot.h | 61 ++ include/asm-x86/mach-default/mach_time.h | 111 +++ include/asm-x86/mach-default/mach_timer.h | 50 + include/asm-x86/mach-default/mach_traps.h | 41 + include/asm-x86/mach-default/mach_wakecpu.h | 42 + include/asm-x86/mach-default/pci-functions.h | 19 + include/asm-x86/mach-default/setup_arch.h | 7 + include/asm-x86/mach-default/smpboot_hooks.h | 44 + include/asm-x86/mach-es7000/mach_apic.h | 206 ++++ include/asm-x86/mach-es7000/mach_apicdef.h | 13 + include/asm-x86/mach-es7000/mach_ipi.h | 24 + include/asm-x86/mach-es7000/mach_mpparse.h | 40 + include/asm-x86/mach-es7000/mach_mpspec.h | 8 + include/asm-x86/mach-es7000/mach_wakecpu.h | 59 ++ include/asm-x86/mach-generic/irq_vectors_limits.h | 14 + include/asm-x86/mach-generic/mach_apic.h | 33 + include/asm-x86/mach-generic/mach_apicdef.h | 11 + include/asm-x86/mach-generic/mach_ipi.h | 10 + include/asm-x86/mach-generic/mach_mpparse.h | 12 + include/asm-x86/mach-generic/mach_mpspec.h | 10 + include/asm-x86/mach-numaq/mach_apic.h | 149 +++ include/asm-x86/mach-numaq/mach_apicdef.h | 14 + include/asm-x86/mach-numaq/mach_ipi.h | 25 + include/asm-x86/mach-numaq/mach_mpparse.h | 29 + include/asm-x86/mach-numaq/mach_mpspec.h | 8 + include/asm-x86/mach-numaq/mach_wakecpu.h | 43 + include/asm-x86/mach-summit/irq_vectors_limits.h | 14 + include/asm-x86/mach-summit/mach_apic.h | 197 ++++ include/asm-x86/mach-summit/mach_apicdef.h | 13 + include/asm-x86/mach-summit/mach_ipi.h | 25 + include/asm-x86/mach-summit/mach_mpparse.h | 121 +++ include/asm-x86/mach-summit/mach_mpspec.h | 9 + include/asm-x86/mach-visws/cobalt.h | 125 +++ include/asm-x86/mach-visws/entry_arch.h | 23 + include/asm-x86/mach-visws/irq_vectors.h | 62 ++ include/asm-x86/mach-visws/lithium.h | 53 + include/asm-x86/mach-visws/mach_apic.h | 103 ++ include/asm-x86/mach-visws/mach_apicdef.h | 12 + include/asm-x86/mach-visws/piix4.h | 107 ++ include/asm-x86/mach-visws/setup_arch.h | 8 + include/asm-x86/mach-visws/smpboot_hooks.h | 24 + include/asm-x86/mach-voyager/do_timer.h | 18 + include/asm-x86/mach-voyager/entry_arch.h | 26 + include/asm-x86/mach-voyager/irq_vectors.h | 79 ++ include/asm-x86/mach-voyager/setup_arch.h | 10 + include/asm-x86/mach_apic.h | 29 + include/asm-x86/math_emu.h | 36 + include/asm-x86/mc146818rtc.h | 5 + include/asm-x86/mc146818rtc_32.h | 97 ++ include/asm-x86/mc146818rtc_64.h | 29 + include/asm-x86/mca.h | 43 + include/asm-x86/mca_dma.h | 201 ++++ include/asm-x86/mce.h | 5 + include/asm-x86/mce_32.h | 11 + include/asm-x86/mce_64.h | 115 +++ include/asm-x86/mman.h | 13 + include/asm-x86/mman_32.h | 17 + include/asm-x86/mman_64.h | 19 + include/asm-x86/mmsegment.h | 8 + include/asm-x86/mmu.h | 5 + include/asm-x86/mmu_32.h | 18 + include/asm-x86/mmu_64.h | 21 + include/asm-x86/mmu_context.h | 5 + include/asm-x86/mmu_context_32.h | 86 ++ include/asm-x86/mmu_context_64.h | 74 ++ include/asm-x86/mmx.h | 14 + include/asm-x86/mmzone.h | 5 + include/asm-x86/mmzone_32.h | 145 +++ include/asm-x86/mmzone_64.h | 56 ++ include/asm-x86/module.h | 5 + include/asm-x86/module_32.h | 75 ++ include/asm-x86/module_64.h | 10 + include/asm-x86/mpspec.h | 5 + include/asm-x86/mpspec_32.h | 81 ++ include/asm-x86/mpspec_64.h | 233 +++++ include/asm-x86/mpspec_def.h | 186 ++++ include/asm-x86/msgbuf.h | 13 + include/asm-x86/msgbuf_32.h | 31 + include/asm-x86/msgbuf_64.h | 27 + include/asm-x86/msidef.h | 47 + include/asm-x86/msr-index.h | 278 ++++++ include/asm-x86/msr.h | 13 + include/asm-x86/msr_32.h | 161 +++ include/asm-x86/msr_64.h | 187 ++++ include/asm-x86/mtrr.h | 13 + include/asm-x86/mtrr_32.h | 115 +++ include/asm-x86/mtrr_64.h | 152 +++ include/asm-x86/mutex.h | 5 + include/asm-x86/mutex_32.h | 130 +++ include/asm-x86/mutex_64.h | 105 ++ include/asm-x86/namei.h | 5 + include/asm-x86/namei_32.h | 17 + include/asm-x86/namei_64.h | 11 + include/asm-x86/nmi.h | 5 + include/asm-x86/nmi_32.h | 64 ++ include/asm-x86/nmi_64.h | 95 ++ include/asm-x86/numa.h | 5 + include/asm-x86/numa_32.h | 3 + include/asm-x86/numa_64.h | 38 + include/asm-x86/numaq.h | 164 ++++ include/asm-x86/page.h | 13 + include/asm-x86/page_32.h | 206 ++++ include/asm-x86/page_64.h | 143 +++ include/asm-x86/param.h | 13 + include/asm-x86/param_32.h | 22 + include/asm-x86/param_64.h | 22 + include/asm-x86/paravirt.h | 1085 +++++++++++++++++++++ include/asm-x86/parport.h | 5 + include/asm-x86/parport_32.h | 18 + include/asm-x86/parport_64.h | 18 + include/asm-x86/pci-direct.h | 17 + include/asm-x86/pci.h | 5 + include/asm-x86/pci_32.h | 90 ++ include/asm-x86/pci_64.h | 126 +++ include/asm-x86/pda.h | 125 +++ include/asm-x86/percpu.h | 5 + include/asm-x86/percpu_32.h | 154 +++ include/asm-x86/percpu_64.h | 68 ++ include/asm-x86/pgalloc.h | 5 + include/asm-x86/pgalloc_32.h | 68 ++ include/asm-x86/pgalloc_64.h | 119 +++ include/asm-x86/pgtable-2level-defs.h | 20 + include/asm-x86/pgtable-2level.h | 86 ++ include/asm-x86/pgtable-3level-defs.h | 28 + include/asm-x86/pgtable-3level.h | 192 ++++ include/asm-x86/pgtable.h | 5 + include/asm-x86/pgtable_32.h | 512 ++++++++++ include/asm-x86/pgtable_64.h | 432 ++++++++ include/asm-x86/poll.h | 1 + include/asm-x86/posix_types.h | 13 + include/asm-x86/posix_types_32.h | 82 ++ include/asm-x86/posix_types_64.h | 119 +++ include/asm-x86/prctl.h | 10 + include/asm-x86/processor-cyrix.h | 30 + include/asm-x86/processor-flags.h | 91 ++ include/asm-x86/processor.h | 5 + include/asm-x86/processor_32.h | 755 ++++++++++++++ include/asm-x86/processor_64.h | 439 +++++++++ include/asm-x86/proto.h | 104 ++ include/asm-x86/ptrace-abi.h | 13 + include/asm-x86/ptrace-abi_32.h | 39 + include/asm-x86/ptrace-abi_64.h | 51 + include/asm-x86/ptrace.h | 13 + include/asm-x86/ptrace_32.h | 63 ++ include/asm-x86/ptrace_64.h | 78 ++ include/asm-x86/reboot.h | 20 + include/asm-x86/reboot_fixups.h | 6 + include/asm-x86/required-features.h | 5 + include/asm-x86/required-features_32.h | 55 ++ include/asm-x86/required-features_64.h | 46 + include/asm-x86/resource.h | 13 + include/asm-x86/resource_32.h | 6 + include/asm-x86/resource_64.h | 6 + include/asm-x86/resume-trace.h | 5 + include/asm-x86/resume-trace_32.h | 13 + include/asm-x86/resume-trace_64.h | 13 + include/asm-x86/rio.h | 74 ++ include/asm-x86/rtc.h | 5 + include/asm-x86/rtc_32.h | 10 + include/asm-x86/rtc_64.h | 10 + include/asm-x86/rwlock.h | 5 + include/asm-x86/rwlock_32.h | 25 + include/asm-x86/rwlock_64.h | 26 + include/asm-x86/rwsem.h | 258 +++++ include/asm-x86/scatterlist.h | 5 + include/asm-x86/scatterlist_32.h | 23 + include/asm-x86/scatterlist_64.h | 24 + include/asm-x86/seccomp.h | 5 + include/asm-x86/seccomp_32.h | 16 + include/asm-x86/seccomp_64.h | 24 + include/asm-x86/sections.h | 5 + include/asm-x86/sections_32.h | 7 + include/asm-x86/sections_64.h | 7 + include/asm-x86/segment.h | 5 + include/asm-x86/segment_32.h | 148 +++ include/asm-x86/segment_64.h | 53 + include/asm-x86/semaphore.h | 5 + include/asm-x86/semaphore_32.h | 176 ++++ include/asm-x86/semaphore_64.h | 181 ++++ include/asm-x86/sembuf.h | 13 + include/asm-x86/sembuf_32.h | 25 + include/asm-x86/sembuf_64.h | 25 + include/asm-x86/serial.h | 5 + include/asm-x86/serial_32.h | 29 + include/asm-x86/serial_64.h | 29 + include/asm-x86/setup.h | 13 + include/asm-x86/setup_32.h | 92 ++ include/asm-x86/setup_64.h | 6 + include/asm-x86/shmbuf.h | 13 + include/asm-x86/shmbuf_32.h | 42 + include/asm-x86/shmbuf_64.h | 38 + include/asm-x86/shmparam.h | 13 + include/asm-x86/shmparam_32.h | 6 + include/asm-x86/shmparam_64.h | 6 + include/asm-x86/sigcontext.h | 13 + include/asm-x86/sigcontext32.h | 71 ++ include/asm-x86/sigcontext_32.h | 85 ++ include/asm-x86/sigcontext_64.h | 55 ++ include/asm-x86/siginfo.h | 13 + include/asm-x86/siginfo_32.h | 6 + include/asm-x86/siginfo_64.h | 8 + include/asm-x86/signal.h | 13 + include/asm-x86/signal_32.h | 232 +++++ include/asm-x86/signal_64.h | 181 ++++ include/asm-x86/smp.h | 5 + include/asm-x86/smp_32.h | 182 ++++ include/asm-x86/smp_64.h | 117 +++ include/asm-x86/socket.h | 55 ++ include/asm-x86/sockios.h | 13 + include/asm-x86/sockios_32.h | 13 + include/asm-x86/sockios_64.h | 13 + include/asm-x86/sparsemem.h | 5 + include/asm-x86/sparsemem_32.h | 31 + include/asm-x86/sparsemem_64.h | 26 + include/asm-x86/spinlock.h | 5 + include/asm-x86/spinlock_32.h | 221 +++++ include/asm-x86/spinlock_64.h | 167 ++++ include/asm-x86/spinlock_types.h | 20 + include/asm-x86/srat.h | 37 + include/asm-x86/stacktrace.h | 20 + include/asm-x86/stat.h | 13 + include/asm-x86/stat_32.h | 77 ++ include/asm-x86/stat_64.h | 44 + include/asm-x86/statfs.h | 13 + include/asm-x86/statfs_32.h | 6 + include/asm-x86/statfs_64.h | 58 ++ include/asm-x86/string.h | 5 + include/asm-x86/string_32.h | 276 ++++++ include/asm-x86/string_64.h | 60 ++ include/asm-x86/suspend.h | 5 + include/asm-x86/suspend_32.h | 46 + include/asm-x86/suspend_64.h | 55 ++ include/asm-x86/swiotlb.h | 56 ++ include/asm-x86/sync_bitops.h | 156 +++ include/asm-x86/system.h | 5 + include/asm-x86/system_32.h | 313 ++++++ include/asm-x86/system_64.h | 180 ++++ include/asm-x86/tce.h | 48 + include/asm-x86/termbits.h | 13 + include/asm-x86/termbits_32.h | 198 ++++ include/asm-x86/termbits_64.h | 198 ++++ include/asm-x86/termios.h | 13 + include/asm-x86/termios_32.h | 90 ++ include/asm-x86/termios_64.h | 90 ++ include/asm-x86/therm_throt.h | 9 + include/asm-x86/thread_info.h | 5 + include/asm-x86/thread_info_32.h | 180 ++++ include/asm-x86/thread_info_64.h | 169 ++++ include/asm-x86/time.h | 44 + include/asm-x86/timer.h | 50 + include/asm-x86/timex.h | 5 + include/asm-x86/timex_32.h | 22 + include/asm-x86/timex_64.h | 31 + include/asm-x86/tlb.h | 5 + include/asm-x86/tlb_32.h | 20 + include/asm-x86/tlb_64.h | 13 + include/asm-x86/tlbflush.h | 5 + include/asm-x86/tlbflush_32.h | 175 ++++ include/asm-x86/tlbflush_64.h | 109 +++ include/asm-x86/topology.h | 5 + include/asm-x86/topology_32.h | 121 +++ include/asm-x86/topology_64.h | 71 ++ include/asm-x86/tsc.h | 75 ++ include/asm-x86/types.h | 13 + include/asm-x86/types_32.h | 64 ++ include/asm-x86/types_64.h | 55 ++ include/asm-x86/uaccess.h | 5 + include/asm-x86/uaccess_32.h | 590 +++++++++++ include/asm-x86/uaccess_64.h | 384 ++++++++ include/asm-x86/ucontext.h | 13 + include/asm-x86/ucontext_32.h | 12 + include/asm-x86/ucontext_64.h | 12 + include/asm-x86/unaligned.h | 5 + include/asm-x86/unaligned_32.h | 37 + include/asm-x86/unaligned_64.h | 37 + include/asm-x86/unistd.h | 13 + include/asm-x86/unistd_32.h | 373 +++++++ include/asm-x86/unistd_64.h | 687 +++++++++++++ include/asm-x86/unwind.h | 5 + include/asm-x86/unwind_32.h | 13 + include/asm-x86/unwind_64.h | 12 + include/asm-x86/user.h | 13 + include/asm-x86/user32.h | 69 ++ include/asm-x86/user_32.h | 121 +++ include/asm-x86/user_64.h | 114 +++ include/asm-x86/vga.h | 20 + include/asm-x86/vgtod.h | 29 + include/asm-x86/vic.h | 61 ++ include/asm-x86/vm86.h | 215 ++++ include/asm-x86/vmi.h | 263 +++++ include/asm-x86/vmi_time.h | 98 ++ include/asm-x86/voyager.h | 517 ++++++++++ include/asm-x86/vsyscall.h | 44 + include/asm-x86/vsyscall32.h | 20 + include/asm-x86/xen/hypercall.h | 413 ++++++++ include/asm-x86/xen/hypervisor.h | 73 ++ include/asm-x86/xen/interface.h | 188 ++++ include/asm-x86/xor.h | 5 + include/asm-x86/xor_32.h | 883 +++++++++++++++++ include/asm-x86/xor_64.h | 354 +++++++ 521 files changed, 37717 insertions(+) create mode 100644 include/asm-x86/8253pit.h create mode 100644 include/asm-x86/8253pit_32.h create mode 100644 include/asm-x86/8253pit_64.h create mode 100644 include/asm-x86/Kbuild create mode 100644 include/asm-x86/a.out.h create mode 100644 include/asm-x86/a.out_32.h create mode 100644 include/asm-x86/a.out_64.h create mode 100644 include/asm-x86/acpi.h create mode 100644 include/asm-x86/acpi_32.h create mode 100644 include/asm-x86/acpi_64.h create mode 100644 include/asm-x86/agp.h create mode 100644 include/asm-x86/agp_32.h create mode 100644 include/asm-x86/agp_64.h create mode 100644 include/asm-x86/alternative-asm.i create mode 100644 include/asm-x86/alternative-asm_32.i create mode 100644 include/asm-x86/alternative-asm_64.i create mode 100644 include/asm-x86/alternative.h create mode 100644 include/asm-x86/alternative_32.h create mode 100644 include/asm-x86/alternative_64.h create mode 100644 include/asm-x86/apic.h create mode 100644 include/asm-x86/apic_32.h create mode 100644 include/asm-x86/apic_64.h create mode 100644 include/asm-x86/apicdef.h create mode 100644 include/asm-x86/apicdef_32.h create mode 100644 include/asm-x86/apicdef_64.h create mode 100644 include/asm-x86/arch_hooks.h create mode 100644 include/asm-x86/atomic.h create mode 100644 include/asm-x86/atomic_32.h create mode 100644 include/asm-x86/atomic_64.h create mode 100644 include/asm-x86/auxvec.h create mode 100644 include/asm-x86/auxvec_32.h create mode 100644 include/asm-x86/auxvec_64.h create mode 100644 include/asm-x86/bitops.h create mode 100644 include/asm-x86/bitops_32.h create mode 100644 include/asm-x86/bitops_64.h create mode 100644 include/asm-x86/boot.h create mode 100644 include/asm-x86/bootparam.h create mode 100644 include/asm-x86/bootsetup.h create mode 100644 include/asm-x86/bug.h create mode 100644 include/asm-x86/bug_32.h create mode 100644 include/asm-x86/bug_64.h create mode 100644 include/asm-x86/bugs.h create mode 100644 include/asm-x86/bugs_32.h create mode 100644 include/asm-x86/bugs_64.h create mode 100644 include/asm-x86/byteorder.h create mode 100644 include/asm-x86/byteorder_32.h create mode 100644 include/asm-x86/byteorder_64.h create mode 100644 include/asm-x86/cache.h create mode 100644 include/asm-x86/cache_32.h create mode 100644 include/asm-x86/cache_64.h create mode 100644 include/asm-x86/cacheflush.h create mode 100644 include/asm-x86/cacheflush_32.h create mode 100644 include/asm-x86/cacheflush_64.h create mode 100644 include/asm-x86/calgary.h create mode 100644 include/asm-x86/calling.h create mode 100644 include/asm-x86/checksum.h create mode 100644 include/asm-x86/checksum_32.h create mode 100644 include/asm-x86/checksum_64.h create mode 100644 include/asm-x86/cmpxchg.h create mode 100644 include/asm-x86/cmpxchg_32.h create mode 100644 include/asm-x86/cmpxchg_64.h create mode 100644 include/asm-x86/compat.h create mode 100644 include/asm-x86/cpu.h create mode 100644 include/asm-x86/cpufeature.h create mode 100644 include/asm-x86/cpufeature_32.h create mode 100644 include/asm-x86/cpufeature_64.h create mode 100644 include/asm-x86/cputime.h create mode 100644 include/asm-x86/cputime_32.h create mode 100644 include/asm-x86/cputime_64.h create mode 100644 include/asm-x86/current.h create mode 100644 include/asm-x86/current_32.h create mode 100644 include/asm-x86/current_64.h create mode 100644 include/asm-x86/debugreg.h create mode 100644 include/asm-x86/debugreg_32.h create mode 100644 include/asm-x86/debugreg_64.h create mode 100644 include/asm-x86/delay.h create mode 100644 include/asm-x86/delay_32.h create mode 100644 include/asm-x86/delay_64.h create mode 100644 include/asm-x86/desc.h create mode 100644 include/asm-x86/desc_32.h create mode 100644 include/asm-x86/desc_64.h create mode 100644 include/asm-x86/desc_defs.h create mode 100644 include/asm-x86/device.h create mode 100644 include/asm-x86/device_32.h create mode 100644 include/asm-x86/device_64.h create mode 100644 include/asm-x86/div64.h create mode 100644 include/asm-x86/div64_32.h create mode 100644 include/asm-x86/div64_64.h create mode 100644 include/asm-x86/dma-mapping.h create mode 100644 include/asm-x86/dma-mapping_32.h create mode 100644 include/asm-x86/dma-mapping_64.h create mode 100644 include/asm-x86/dma.h create mode 100644 include/asm-x86/dma_32.h create mode 100644 include/asm-x86/dma_64.h create mode 100644 include/asm-x86/dmi.h create mode 100644 include/asm-x86/dmi_32.h create mode 100644 include/asm-x86/dmi_64.h create mode 100644 include/asm-x86/dwarf2.h create mode 100644 include/asm-x86/dwarf2_32.h create mode 100644 include/asm-x86/dwarf2_64.h create mode 100644 include/asm-x86/e820.h create mode 100644 include/asm-x86/e820_32.h create mode 100644 include/asm-x86/e820_64.h create mode 100644 include/asm-x86/edac.h create mode 100644 include/asm-x86/edac_32.h create mode 100644 include/asm-x86/edac_64.h create mode 100644 include/asm-x86/elf.h create mode 100644 include/asm-x86/elf_32.h create mode 100644 include/asm-x86/elf_64.h create mode 100644 include/asm-x86/emergency-restart.h create mode 100644 include/asm-x86/errno.h create mode 100644 include/asm-x86/errno_32.h create mode 100644 include/asm-x86/errno_64.h create mode 100644 include/asm-x86/fb.h create mode 100644 include/asm-x86/fb_32.h create mode 100644 include/asm-x86/fb_64.h create mode 100644 include/asm-x86/fcntl.h create mode 100644 include/asm-x86/fixmap.h create mode 100644 include/asm-x86/fixmap_32.h create mode 100644 include/asm-x86/fixmap_64.h create mode 100644 include/asm-x86/floppy.h create mode 100644 include/asm-x86/floppy_32.h create mode 100644 include/asm-x86/floppy_64.h create mode 100644 include/asm-x86/fpu32.h create mode 100644 include/asm-x86/frame.i create mode 100644 include/asm-x86/futex.h create mode 100644 include/asm-x86/futex_32.h create mode 100644 include/asm-x86/futex_64.h create mode 100644 include/asm-x86/genapic.h create mode 100644 include/asm-x86/genapic_32.h create mode 100644 include/asm-x86/genapic_64.h create mode 100644 include/asm-x86/geode.h create mode 100644 include/asm-x86/hardirq.h create mode 100644 include/asm-x86/hardirq_32.h create mode 100644 include/asm-x86/hardirq_64.h create mode 100644 include/asm-x86/highmem.h create mode 100644 include/asm-x86/hpet.h create mode 100644 include/asm-x86/hpet_32.h create mode 100644 include/asm-x86/hpet_64.h create mode 100644 include/asm-x86/hw_irq.h create mode 100644 include/asm-x86/hw_irq_32.h create mode 100644 include/asm-x86/hw_irq_64.h create mode 100644 include/asm-x86/hypertransport.h create mode 100644 include/asm-x86/i387.h create mode 100644 include/asm-x86/i387_32.h create mode 100644 include/asm-x86/i387_64.h create mode 100644 include/asm-x86/i8253.h create mode 100644 include/asm-x86/i8253_32.h create mode 100644 include/asm-x86/i8253_64.h create mode 100644 include/asm-x86/i8259.h create mode 100644 include/asm-x86/ia32.h create mode 100644 include/asm-x86/ia32_unistd.h create mode 100644 include/asm-x86/ide.h create mode 100644 include/asm-x86/idle.h create mode 100644 include/asm-x86/intel_arch_perfmon.h create mode 100644 include/asm-x86/intel_arch_perfmon_32.h create mode 100644 include/asm-x86/intel_arch_perfmon_64.h create mode 100644 include/asm-x86/io.h create mode 100644 include/asm-x86/io_32.h create mode 100644 include/asm-x86/io_64.h create mode 100644 include/asm-x86/io_apic.h create mode 100644 include/asm-x86/io_apic_32.h create mode 100644 include/asm-x86/io_apic_64.h create mode 100644 include/asm-x86/ioctl.h create mode 100644 include/asm-x86/ioctls.h create mode 100644 include/asm-x86/ioctls_32.h create mode 100644 include/asm-x86/ioctls_64.h create mode 100644 include/asm-x86/iommu.h create mode 100644 include/asm-x86/ipc.h create mode 100644 include/asm-x86/ipcbuf.h create mode 100644 include/asm-x86/ipcbuf_32.h create mode 100644 include/asm-x86/ipcbuf_64.h create mode 100644 include/asm-x86/ipi.h create mode 100644 include/asm-x86/irq.h create mode 100644 include/asm-x86/irq_32.h create mode 100644 include/asm-x86/irq_64.h create mode 100644 include/asm-x86/irq_regs.h create mode 100644 include/asm-x86/irq_regs_32.h create mode 100644 include/asm-x86/irq_regs_64.h create mode 100644 include/asm-x86/irqflags.h create mode 100644 include/asm-x86/irqflags_32.h create mode 100644 include/asm-x86/irqflags_64.h create mode 100644 include/asm-x86/ist.h create mode 100644 include/asm-x86/k8.h create mode 100644 include/asm-x86/kdebug.h create mode 100644 include/asm-x86/kdebug_32.h create mode 100644 include/asm-x86/kdebug_64.h create mode 100644 include/asm-x86/kexec.h create mode 100644 include/asm-x86/kexec_32.h create mode 100644 include/asm-x86/kexec_64.h create mode 100644 include/asm-x86/kmap_types.h create mode 100644 include/asm-x86/kmap_types_32.h create mode 100644 include/asm-x86/kmap_types_64.h create mode 100644 include/asm-x86/kprobes.h create mode 100644 include/asm-x86/kprobes_32.h create mode 100644 include/asm-x86/kprobes_64.h create mode 100644 include/asm-x86/ldt.h create mode 100644 include/asm-x86/ldt_32.h create mode 100644 include/asm-x86/ldt_64.h create mode 100644 include/asm-x86/linkage.h create mode 100644 include/asm-x86/linkage_32.h create mode 100644 include/asm-x86/linkage_64.h create mode 100644 include/asm-x86/local.h create mode 100644 include/asm-x86/local_32.h create mode 100644 include/asm-x86/local_64.h create mode 100644 include/asm-x86/mach-bigsmp/mach_apic.h create mode 100644 include/asm-x86/mach-bigsmp/mach_apicdef.h create mode 100644 include/asm-x86/mach-bigsmp/mach_ipi.h create mode 100644 include/asm-x86/mach-bigsmp/mach_mpspec.h create mode 100644 include/asm-x86/mach-default/apm.h create mode 100644 include/asm-x86/mach-default/bios_ebda.h create mode 100644 include/asm-x86/mach-default/do_timer.h create mode 100644 include/asm-x86/mach-default/entry_arch.h create mode 100644 include/asm-x86/mach-default/io_ports.h create mode 100644 include/asm-x86/mach-default/irq_vectors.h create mode 100644 include/asm-x86/mach-default/irq_vectors_limits.h create mode 100644 include/asm-x86/mach-default/mach_apic.h create mode 100644 include/asm-x86/mach-default/mach_apicdef.h create mode 100644 include/asm-x86/mach-default/mach_ipi.h create mode 100644 include/asm-x86/mach-default/mach_mpparse.h create mode 100644 include/asm-x86/mach-default/mach_mpspec.h create mode 100644 include/asm-x86/mach-default/mach_reboot.h create mode 100644 include/asm-x86/mach-default/mach_time.h create mode 100644 include/asm-x86/mach-default/mach_timer.h create mode 100644 include/asm-x86/mach-default/mach_traps.h create mode 100644 include/asm-x86/mach-default/mach_wakecpu.h create mode 100644 include/asm-x86/mach-default/pci-functions.h create mode 100644 include/asm-x86/mach-default/setup_arch.h create mode 100644 include/asm-x86/mach-default/smpboot_hooks.h create mode 100644 include/asm-x86/mach-es7000/mach_apic.h create mode 100644 include/asm-x86/mach-es7000/mach_apicdef.h create mode 100644 include/asm-x86/mach-es7000/mach_ipi.h create mode 100644 include/asm-x86/mach-es7000/mach_mpparse.h create mode 100644 include/asm-x86/mach-es7000/mach_mpspec.h create mode 100644 include/asm-x86/mach-es7000/mach_wakecpu.h create mode 100644 include/asm-x86/mach-generic/irq_vectors_limits.h create mode 100644 include/asm-x86/mach-generic/mach_apic.h create mode 100644 include/asm-x86/mach-generic/mach_apicdef.h create mode 100644 include/asm-x86/mach-generic/mach_ipi.h create mode 100644 include/asm-x86/mach-generic/mach_mpparse.h create mode 100644 include/asm-x86/mach-generic/mach_mpspec.h create mode 100644 include/asm-x86/mach-numaq/mach_apic.h create mode 100644 include/asm-x86/mach-numaq/mach_apicdef.h create mode 100644 include/asm-x86/mach-numaq/mach_ipi.h create mode 100644 include/asm-x86/mach-numaq/mach_mpparse.h create mode 100644 include/asm-x86/mach-numaq/mach_mpspec.h create mode 100644 include/asm-x86/mach-numaq/mach_wakecpu.h create mode 100644 include/asm-x86/mach-summit/irq_vectors_limits.h create mode 100644 include/asm-x86/mach-summit/mach_apic.h create mode 100644 include/asm-x86/mach-summit/mach_apicdef.h create mode 100644 include/asm-x86/mach-summit/mach_ipi.h create mode 100644 include/asm-x86/mach-summit/mach_mpparse.h create mode 100644 include/asm-x86/mach-summit/mach_mpspec.h create mode 100644 include/asm-x86/mach-visws/cobalt.h create mode 100644 include/asm-x86/mach-visws/entry_arch.h create mode 100644 include/asm-x86/mach-visws/irq_vectors.h create mode 100644 include/asm-x86/mach-visws/lithium.h create mode 100644 include/asm-x86/mach-visws/mach_apic.h create mode 100644 include/asm-x86/mach-visws/mach_apicdef.h create mode 100644 include/asm-x86/mach-visws/piix4.h create mode 100644 include/asm-x86/mach-visws/setup_arch.h create mode 100644 include/asm-x86/mach-visws/smpboot_hooks.h create mode 100644 include/asm-x86/mach-voyager/do_timer.h create mode 100644 include/asm-x86/mach-voyager/entry_arch.h create mode 100644 include/asm-x86/mach-voyager/irq_vectors.h create mode 100644 include/asm-x86/mach-voyager/setup_arch.h create mode 100644 include/asm-x86/mach_apic.h create mode 100644 include/asm-x86/math_emu.h create mode 100644 include/asm-x86/mc146818rtc.h create mode 100644 include/asm-x86/mc146818rtc_32.h create mode 100644 include/asm-x86/mc146818rtc_64.h create mode 100644 include/asm-x86/mca.h create mode 100644 include/asm-x86/mca_dma.h create mode 100644 include/asm-x86/mce.h create mode 100644 include/asm-x86/mce_32.h create mode 100644 include/asm-x86/mce_64.h create mode 100644 include/asm-x86/mman.h create mode 100644 include/asm-x86/mman_32.h create mode 100644 include/asm-x86/mman_64.h create mode 100644 include/asm-x86/mmsegment.h create mode 100644 include/asm-x86/mmu.h create mode 100644 include/asm-x86/mmu_32.h create mode 100644 include/asm-x86/mmu_64.h create mode 100644 include/asm-x86/mmu_context.h create mode 100644 include/asm-x86/mmu_context_32.h create mode 100644 include/asm-x86/mmu_context_64.h create mode 100644 include/asm-x86/mmx.h create mode 100644 include/asm-x86/mmzone.h create mode 100644 include/asm-x86/mmzone_32.h create mode 100644 include/asm-x86/mmzone_64.h create mode 100644 include/asm-x86/module.h create mode 100644 include/asm-x86/module_32.h create mode 100644 include/asm-x86/module_64.h create mode 100644 include/asm-x86/mpspec.h create mode 100644 include/asm-x86/mpspec_32.h create mode 100644 include/asm-x86/mpspec_64.h create mode 100644 include/asm-x86/mpspec_def.h create mode 100644 include/asm-x86/msgbuf.h create mode 100644 include/asm-x86/msgbuf_32.h create mode 100644 include/asm-x86/msgbuf_64.h create mode 100644 include/asm-x86/msidef.h create mode 100644 include/asm-x86/msr-index.h create mode 100644 include/asm-x86/msr.h create mode 100644 include/asm-x86/msr_32.h create mode 100644 include/asm-x86/msr_64.h create mode 100644 include/asm-x86/mtrr.h create mode 100644 include/asm-x86/mtrr_32.h create mode 100644 include/asm-x86/mtrr_64.h create mode 100644 include/asm-x86/mutex.h create mode 100644 include/asm-x86/mutex_32.h create mode 100644 include/asm-x86/mutex_64.h create mode 100644 include/asm-x86/namei.h create mode 100644 include/asm-x86/namei_32.h create mode 100644 include/asm-x86/namei_64.h create mode 100644 include/asm-x86/nmi.h create mode 100644 include/asm-x86/nmi_32.h create mode 100644 include/asm-x86/nmi_64.h create mode 100644 include/asm-x86/numa.h create mode 100644 include/asm-x86/numa_32.h create mode 100644 include/asm-x86/numa_64.h create mode 100644 include/asm-x86/numaq.h create mode 100644 include/asm-x86/page.h create mode 100644 include/asm-x86/page_32.h create mode 100644 include/asm-x86/page_64.h create mode 100644 include/asm-x86/param.h create mode 100644 include/asm-x86/param_32.h create mode 100644 include/asm-x86/param_64.h create mode 100644 include/asm-x86/paravirt.h create mode 100644 include/asm-x86/parport.h create mode 100644 include/asm-x86/parport_32.h create mode 100644 include/asm-x86/parport_64.h create mode 100644 include/asm-x86/pci-direct.h create mode 100644 include/asm-x86/pci.h create mode 100644 include/asm-x86/pci_32.h create mode 100644 include/asm-x86/pci_64.h create mode 100644 include/asm-x86/pda.h create mode 100644 include/asm-x86/percpu.h create mode 100644 include/asm-x86/percpu_32.h create mode 100644 include/asm-x86/percpu_64.h create mode 100644 include/asm-x86/pgalloc.h create mode 100644 include/asm-x86/pgalloc_32.h create mode 100644 include/asm-x86/pgalloc_64.h create mode 100644 include/asm-x86/pgtable-2level-defs.h create mode 100644 include/asm-x86/pgtable-2level.h create mode 100644 include/asm-x86/pgtable-3level-defs.h create mode 100644 include/asm-x86/pgtable-3level.h create mode 100644 include/asm-x86/pgtable.h create mode 100644 include/asm-x86/pgtable_32.h create mode 100644 include/asm-x86/pgtable_64.h create mode 100644 include/asm-x86/poll.h create mode 100644 include/asm-x86/posix_types.h create mode 100644 include/asm-x86/posix_types_32.h create mode 100644 include/asm-x86/posix_types_64.h create mode 100644 include/asm-x86/prctl.h create mode 100644 include/asm-x86/processor-cyrix.h create mode 100644 include/asm-x86/processor-flags.h create mode 100644 include/asm-x86/processor.h create mode 100644 include/asm-x86/processor_32.h create mode 100644 include/asm-x86/processor_64.h create mode 100644 include/asm-x86/proto.h create mode 100644 include/asm-x86/ptrace-abi.h create mode 100644 include/asm-x86/ptrace-abi_32.h create mode 100644 include/asm-x86/ptrace-abi_64.h create mode 100644 include/asm-x86/ptrace.h create mode 100644 include/asm-x86/ptrace_32.h create mode 100644 include/asm-x86/ptrace_64.h create mode 100644 include/asm-x86/reboot.h create mode 100644 include/asm-x86/reboot_fixups.h create mode 100644 include/asm-x86/required-features.h create mode 100644 include/asm-x86/required-features_32.h create mode 100644 include/asm-x86/required-features_64.h create mode 100644 include/asm-x86/resource.h create mode 100644 include/asm-x86/resource_32.h create mode 100644 include/asm-x86/resource_64.h create mode 100644 include/asm-x86/resume-trace.h create mode 100644 include/asm-x86/resume-trace_32.h create mode 100644 include/asm-x86/resume-trace_64.h create mode 100644 include/asm-x86/rio.h create mode 100644 include/asm-x86/rtc.h create mode 100644 include/asm-x86/rtc_32.h create mode 100644 include/asm-x86/rtc_64.h create mode 100644 include/asm-x86/rwlock.h create mode 100644 include/asm-x86/rwlock_32.h create mode 100644 include/asm-x86/rwlock_64.h create mode 100644 include/asm-x86/rwsem.h create mode 100644 include/asm-x86/scatterlist.h create mode 100644 include/asm-x86/scatterlist_32.h create mode 100644 include/asm-x86/scatterlist_64.h create mode 100644 include/asm-x86/seccomp.h create mode 100644 include/asm-x86/seccomp_32.h create mode 100644 include/asm-x86/seccomp_64.h create mode 100644 include/asm-x86/sections.h create mode 100644 include/asm-x86/sections_32.h create mode 100644 include/asm-x86/sections_64.h create mode 100644 include/asm-x86/segment.h create mode 100644 include/asm-x86/segment_32.h create mode 100644 include/asm-x86/segment_64.h create mode 100644 include/asm-x86/semaphore.h create mode 100644 include/asm-x86/semaphore_32.h create mode 100644 include/asm-x86/semaphore_64.h create mode 100644 include/asm-x86/sembuf.h create mode 100644 include/asm-x86/sembuf_32.h create mode 100644 include/asm-x86/sembuf_64.h create mode 100644 include/asm-x86/serial.h create mode 100644 include/asm-x86/serial_32.h create mode 100644 include/asm-x86/serial_64.h create mode 100644 include/asm-x86/setup.h create mode 100644 include/asm-x86/setup_32.h create mode 100644 include/asm-x86/setup_64.h create mode 100644 include/asm-x86/shmbuf.h create mode 100644 include/asm-x86/shmbuf_32.h create mode 100644 include/asm-x86/shmbuf_64.h create mode 100644 include/asm-x86/shmparam.h create mode 100644 include/asm-x86/shmparam_32.h create mode 100644 include/asm-x86/shmparam_64.h create mode 100644 include/asm-x86/sigcontext.h create mode 100644 include/asm-x86/sigcontext32.h create mode 100644 include/asm-x86/sigcontext_32.h create mode 100644 include/asm-x86/sigcontext_64.h create mode 100644 include/asm-x86/siginfo.h create mode 100644 include/asm-x86/siginfo_32.h create mode 100644 include/asm-x86/siginfo_64.h create mode 100644 include/asm-x86/signal.h create mode 100644 include/asm-x86/signal_32.h create mode 100644 include/asm-x86/signal_64.h create mode 100644 include/asm-x86/smp.h create mode 100644 include/asm-x86/smp_32.h create mode 100644 include/asm-x86/smp_64.h create mode 100644 include/asm-x86/socket.h create mode 100644 include/asm-x86/sockios.h create mode 100644 include/asm-x86/sockios_32.h create mode 100644 include/asm-x86/sockios_64.h create mode 100644 include/asm-x86/sparsemem.h create mode 100644 include/asm-x86/sparsemem_32.h create mode 100644 include/asm-x86/sparsemem_64.h create mode 100644 include/asm-x86/spinlock.h create mode 100644 include/asm-x86/spinlock_32.h create mode 100644 include/asm-x86/spinlock_64.h create mode 100644 include/asm-x86/spinlock_types.h create mode 100644 include/asm-x86/srat.h create mode 100644 include/asm-x86/stacktrace.h create mode 100644 include/asm-x86/stat.h create mode 100644 include/asm-x86/stat_32.h create mode 100644 include/asm-x86/stat_64.h create mode 100644 include/asm-x86/statfs.h create mode 100644 include/asm-x86/statfs_32.h create mode 100644 include/asm-x86/statfs_64.h create mode 100644 include/asm-x86/string.h create mode 100644 include/asm-x86/string_32.h create mode 100644 include/asm-x86/string_64.h create mode 100644 include/asm-x86/suspend.h create mode 100644 include/asm-x86/suspend_32.h create mode 100644 include/asm-x86/suspend_64.h create mode 100644 include/asm-x86/swiotlb.h create mode 100644 include/asm-x86/sync_bitops.h create mode 100644 include/asm-x86/system.h create mode 100644 include/asm-x86/system_32.h create mode 100644 include/asm-x86/system_64.h create mode 100644 include/asm-x86/tce.h create mode 100644 include/asm-x86/termbits.h create mode 100644 include/asm-x86/termbits_32.h create mode 100644 include/asm-x86/termbits_64.h create mode 100644 include/asm-x86/termios.h create mode 100644 include/asm-x86/termios_32.h create mode 100644 include/asm-x86/termios_64.h create mode 100644 include/asm-x86/therm_throt.h create mode 100644 include/asm-x86/thread_info.h create mode 100644 include/asm-x86/thread_info_32.h create mode 100644 include/asm-x86/thread_info_64.h create mode 100644 include/asm-x86/time.h create mode 100644 include/asm-x86/timer.h create mode 100644 include/asm-x86/timex.h create mode 100644 include/asm-x86/timex_32.h create mode 100644 include/asm-x86/timex_64.h create mode 100644 include/asm-x86/tlb.h create mode 100644 include/asm-x86/tlb_32.h create mode 100644 include/asm-x86/tlb_64.h create mode 100644 include/asm-x86/tlbflush.h create mode 100644 include/asm-x86/tlbflush_32.h create mode 100644 include/asm-x86/tlbflush_64.h create mode 100644 include/asm-x86/topology.h create mode 100644 include/asm-x86/topology_32.h create mode 100644 include/asm-x86/topology_64.h create mode 100644 include/asm-x86/tsc.h create mode 100644 include/asm-x86/types.h create mode 100644 include/asm-x86/types_32.h create mode 100644 include/asm-x86/types_64.h create mode 100644 include/asm-x86/uaccess.h create mode 100644 include/asm-x86/uaccess_32.h create mode 100644 include/asm-x86/uaccess_64.h create mode 100644 include/asm-x86/ucontext.h create mode 100644 include/asm-x86/ucontext_32.h create mode 100644 include/asm-x86/ucontext_64.h create mode 100644 include/asm-x86/unaligned.h create mode 100644 include/asm-x86/unaligned_32.h create mode 100644 include/asm-x86/unaligned_64.h create mode 100644 include/asm-x86/unistd.h create mode 100644 include/asm-x86/unistd_32.h create mode 100644 include/asm-x86/unistd_64.h create mode 100644 include/asm-x86/unwind.h create mode 100644 include/asm-x86/unwind_32.h create mode 100644 include/asm-x86/unwind_64.h create mode 100644 include/asm-x86/user.h create mode 100644 include/asm-x86/user32.h create mode 100644 include/asm-x86/user_32.h create mode 100644 include/asm-x86/user_64.h create mode 100644 include/asm-x86/vga.h create mode 100644 include/asm-x86/vgtod.h create mode 100644 include/asm-x86/vic.h create mode 100644 include/asm-x86/vm86.h create mode 100644 include/asm-x86/vmi.h create mode 100644 include/asm-x86/vmi_time.h create mode 100644 include/asm-x86/voyager.h create mode 100644 include/asm-x86/vsyscall.h create mode 100644 include/asm-x86/vsyscall32.h create mode 100644 include/asm-x86/xen/hypercall.h create mode 100644 include/asm-x86/xen/hypervisor.h create mode 100644 include/asm-x86/xen/interface.h create mode 100644 include/asm-x86/xor.h create mode 100644 include/asm-x86/xor_32.h create mode 100644 include/asm-x86/xor_64.h (limited to 'include/asm-x86') diff --git a/include/asm-x86/8253pit.h b/include/asm-x86/8253pit.h new file mode 100644 index 000000000000..d3c2b38a6618 --- /dev/null +++ b/include/asm-x86/8253pit.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "8253pit_32.h" +#else +# include "8253pit_64.h" +#endif diff --git a/include/asm-x86/8253pit_32.h b/include/asm-x86/8253pit_32.h new file mode 100644 index 000000000000..96c7c3592daf --- /dev/null +++ b/include/asm-x86/8253pit_32.h @@ -0,0 +1,12 @@ +/* + * 8253/8254 Programmable Interval Timer + */ + +#ifndef _8253PIT_H +#define _8253PIT_H + +#include + +#define PIT_TICK_RATE CLOCK_TICK_RATE + +#endif diff --git a/include/asm-x86/8253pit_64.h b/include/asm-x86/8253pit_64.h new file mode 100644 index 000000000000..285f78488ccb --- /dev/null +++ b/include/asm-x86/8253pit_64.h @@ -0,0 +1,10 @@ +/* + * 8253/8254 Programmable Interval Timer + */ + +#ifndef _8253PIT_H +#define _8253PIT_H + +#define PIT_TICK_RATE 1193182UL + +#endif diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild new file mode 100644 index 000000000000..c5e43cb39874 --- /dev/null +++ b/include/asm-x86/Kbuild @@ -0,0 +1,88 @@ +include include/asm-generic/Kbuild.asm + +header-y += boot.h +header-y += bootsetup.h +header-y += debugreg_32.h +header-y += debugreg_64.h +header-y += debugreg.h +header-y += ldt_32.h +header-y += ldt_64.h +header-y += ldt.h +header-y += msr-index.h +header-y += prctl.h +header-y += ptrace-abi_32.h +header-y += ptrace-abi_64.h +header-y += ptrace-abi.h +header-y += sigcontext32.h +header-y += ucontext_32.h +header-y += ucontext_64.h +header-y += ucontext.h +header-y += vsyscall32.h + +unifdef-y += a.out_32.h +unifdef-y += a.out_64.h +unifdef-y += auxvec_32.h +unifdef-y += auxvec_64.h +unifdef-y += byteorder_32.h +unifdef-y += byteorder_64.h +unifdef-y += elf_32.h +unifdef-y += elf_64.h +unifdef-y += errno_32.h +unifdef-y += errno_64.h +unifdef-y += ioctls_32.h +unifdef-y += ioctls_64.h +unifdef-y += ipcbuf_32.h +unifdef-y += ipcbuf_64.h +unifdef-y += mce.h +unifdef-y += mman_32.h +unifdef-y += mman_64.h +unifdef-y += msgbuf_32.h +unifdef-y += msgbuf_64.h +unifdef-y += msr_32.h +unifdef-y += msr_64.h +unifdef-y += msr.h +unifdef-y += mtrr_32.h +unifdef-y += mtrr_64.h +unifdef-y += mtrr.h +unifdef-y += page_32.h +unifdef-y += page_64.h +unifdef-y += param_32.h +unifdef-y += param_64.h +unifdef-y += posix_types_32.h +unifdef-y += posix_types_64.h +unifdef-y += ptrace_32.h +unifdef-y += ptrace_64.h +unifdef-y += resource_32.h +unifdef-y += resource_64.h +unifdef-y += sembuf_32.h +unifdef-y += sembuf_64.h +unifdef-y += setup_32.h +unifdef-y += setup_64.h +unifdef-y += shmbuf_32.h +unifdef-y += shmbuf_64.h +unifdef-y += shmparam_32.h +unifdef-y += shmparam_64.h +unifdef-y += sigcontext_32.h +unifdef-y += sigcontext_64.h +unifdef-y += siginfo_32.h +unifdef-y += siginfo_64.h +unifdef-y += signal_32.h +unifdef-y += signal_64.h +unifdef-y += sockios_32.h +unifdef-y += sockios_64.h +unifdef-y += stat_32.h +unifdef-y += stat_64.h +unifdef-y += statfs_32.h +unifdef-y += statfs_64.h +unifdef-y += termbits_32.h +unifdef-y += termbits_64.h +unifdef-y += termios_32.h +unifdef-y += termios_64.h +unifdef-y += types_32.h +unifdef-y += types_64.h +unifdef-y += unistd_32.h +unifdef-y += unistd_64.h +unifdef-y += user_32.h +unifdef-y += user_64.h +unifdef-y += vm86.h +unifdef-y += vsyscall.h diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h new file mode 100644 index 000000000000..5bc9b1d3b227 --- /dev/null +++ b/include/asm-x86/a.out.h @@ -0,0 +1,13 @@ +#ifdef __KERNEL__ +# ifdef CONFIG_X86_32 +# include "a.out_32.h" +# else +# include "a.out_64.h" +# endif +#else +# ifdef __i386__ +# include "a.out_32.h" +# else +# include "a.out_64.h" +# endif +#endif diff --git a/include/asm-x86/a.out_32.h b/include/asm-x86/a.out_32.h new file mode 100644 index 000000000000..851a60f8258c --- /dev/null +++ b/include/asm-x86/a.out_32.h @@ -0,0 +1,27 @@ +#ifndef __I386_A_OUT_H__ +#define __I386_A_OUT_H__ + +struct exec +{ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP + +#endif + +#endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-x86/a.out_64.h b/include/asm-x86/a.out_64.h new file mode 100644 index 000000000000..e789300e41a5 --- /dev/null +++ b/include/asm-x86/a.out_64.h @@ -0,0 +1,28 @@ +#ifndef __X8664_A_OUT_H__ +#define __X8664_A_OUT_H__ + +/* 32bit a.out */ + +struct exec +{ + unsigned int a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ +#include +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE64 +#endif + +#endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h new file mode 100644 index 000000000000..0693689d4146 --- /dev/null +++ b/include/asm-x86/acpi.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "acpi_32.h" +#else +# include "acpi_64.h" +#endif diff --git a/include/asm-x86/acpi_32.h b/include/asm-x86/acpi_32.h new file mode 100644 index 000000000000..125179adf044 --- /dev/null +++ b/include/asm-x86/acpi_32.h @@ -0,0 +1,147 @@ +/* + * asm-i386/acpi.h + * + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _ASM_ACPI_H +#define _ASM_ACPI_H + +#ifdef __KERNEL__ + +#include + +#include /* defines cmpxchg */ + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() wbinvd() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + asm("divl %2;" \ + :"=a"(q32), "=d"(r32) \ + :"r"(d32), \ + "0"(n_lo), "1"(n_hi)) + + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + asm("shrl $1,%2;" \ + "rcrl $1,%3;" \ + :"=r"(n_hi), "=r"(n_lo) \ + :"0"(n_hi), "1"(n_lo)) + +#ifdef CONFIG_X86_IO_APIC +extern void check_acpi_pci(void); +#else +static inline void check_acpi_pci(void) { } +#endif + +#ifdef CONFIG_ACPI +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_ht; +extern int acpi_pci_disabled; +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_ht = 0; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ +#define FIX_ACPI_PAGES 4 + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); + +#ifdef CONFIG_X86_IO_APIC +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; +#endif + +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} +extern int acpi_irq_balance_set(char *str); + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +/* early initialization routine */ +extern void acpi_reserve_bootmem(void); + +#else /* !CONFIG_ACPI */ + +#define acpi_lapic 0 +#define acpi_ioapic 0 +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define ARCH_HAS_POWER_INIT 1 + +#endif /*__KERNEL__*/ + +#endif /*_ASM_ACPI_H*/ diff --git a/include/asm-x86/acpi_64.h b/include/asm-x86/acpi_64.h new file mode 100644 index 000000000000..98173357dd89 --- /dev/null +++ b/include/asm-x86/acpi_64.h @@ -0,0 +1,153 @@ +/* + * asm-x86_64/acpi.h + * + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _ASM_ACPI_H +#define _ASM_ACPI_H + +#ifdef __KERNEL__ + +#include +#include + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() wbinvd() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + asm("divl %2;" \ + :"=a"(q32), "=d"(r32) \ + :"r"(d32), \ + "0"(n_lo), "1"(n_hi)) + + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + asm("shrl $1,%2;" \ + "rcrl $1,%3;" \ + :"=r"(n_hi), "=r"(n_lo) \ + :"0"(n_hi), "1"(n_lo)) + +#ifdef CONFIG_ACPI +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; +extern int acpi_ht; +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_ht = 0; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ +#define FIX_ACPI_PAGES 4 + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} +extern int acpi_irq_balance_set(char *str); + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +/* early initialization routine */ +extern void acpi_reserve_bootmem(void); + +#else /* !CONFIG_ACPI */ + +#define acpi_lapic 0 +#define acpi_ioapic 0 +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } + +#endif /* !CONFIG_ACPI */ + +extern int acpi_numa; +extern int acpi_scan_nodes(unsigned long start, unsigned long end); +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) + +extern int acpi_disabled; +extern int acpi_pci_disabled; + +#define ARCH_HAS_POWER_INIT 1 + +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; + +#ifdef CONFIG_ACPI_NUMA +extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes, + int num_nodes); +#else +static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, + int num_nodes) +{ +} +#endif + +#endif /*__KERNEL__*/ + +#endif /*_ASM_ACPI_H*/ diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h new file mode 100644 index 000000000000..9348f1e4f6f1 --- /dev/null +++ b/include/asm-x86/agp.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "agp_32.h" +#else +# include "agp_64.h" +#endif diff --git a/include/asm-x86/agp_32.h b/include/asm-x86/agp_32.h new file mode 100644 index 000000000000..6af173dbf123 --- /dev/null +++ b/include/asm-x86/agp_32.h @@ -0,0 +1,36 @@ +#ifndef AGP_H +#define AGP_H 1 + +#include +#include + +/* + * Functions to keep the agpgart mappings coherent with the MMU. + * The GART gives the CPU a physical alias of pages in memory. The alias region is + * mapped uncacheable. Make sure there are no conflicting mappings + * with different cachability attributes for the same page. This avoids + * data corruption on some CPUs. + */ + +/* Caller's responsibility to call global_flush_tlb() for + * performance reasons */ +#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) +#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) +#define flush_agp_mappings() global_flush_tlb() + +/* Could use CLFLUSH here if the cpu supports it. But then it would + need to be called for each cacheline of the whole page so it may not be + worth it. Would need a page for it. */ +#define flush_agp_cache() wbinvd() + +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + +#endif diff --git a/include/asm-x86/agp_64.h b/include/asm-x86/agp_64.h new file mode 100644 index 000000000000..de338666f3f9 --- /dev/null +++ b/include/asm-x86/agp_64.h @@ -0,0 +1,34 @@ +#ifndef AGP_H +#define AGP_H 1 + +#include + +/* + * Functions to keep the agpgart mappings coherent. + * The GART gives the CPU a physical alias of memory. The alias is + * mapped uncacheable. Make sure there are no conflicting mappings + * with different cachability attributes for the same page. + */ + +/* Caller's responsibility to call global_flush_tlb() for + * performance reasons */ +#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) +#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) +#define flush_agp_mappings() global_flush_tlb() + +/* Could use CLFLUSH here if the cpu supports it. But then it would + need to be called for each cacheline of the whole page so it may not be + worth it. Would need a page for it. */ +#define flush_agp_cache() asm volatile("wbinvd":::"memory") + +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + +#endif diff --git a/include/asm-x86/alternative-asm.i b/include/asm-x86/alternative-asm.i new file mode 100644 index 000000000000..4f360cd3c888 --- /dev/null +++ b/include/asm-x86/alternative-asm.i @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "alternative-asm_32.i" +#else +# include "alternative-asm_64.i" +#endif diff --git a/include/asm-x86/alternative-asm_32.i b/include/asm-x86/alternative-asm_32.i new file mode 100644 index 000000000000..f0510209ccbe --- /dev/null +++ b/include/asm-x86/alternative-asm_32.i @@ -0,0 +1,12 @@ +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +1: lock + .section .smp_locks,"a" + .align 4 + .long 1b + .previous + .endm +#else + .macro LOCK_PREFIX + .endm +#endif diff --git a/include/asm-x86/alternative-asm_64.i b/include/asm-x86/alternative-asm_64.i new file mode 100644 index 000000000000..0b3f1a2bb2cb --- /dev/null +++ b/include/asm-x86/alternative-asm_64.i @@ -0,0 +1,12 @@ +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +1: lock + .section .smp_locks,"a" + .align 8 + .quad 1b + .previous + .endm +#else + .macro LOCK_PREFIX + .endm +#endif diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h new file mode 100644 index 000000000000..9eef6a32a130 --- /dev/null +++ b/include/asm-x86/alternative.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "alternative_32.h" +#else +# include "alternative_64.h" +#endif diff --git a/include/asm-x86/alternative_32.h b/include/asm-x86/alternative_32.h new file mode 100644 index 000000000000..bda6c810c0f4 --- /dev/null +++ b/include/asm-x86/alternative_32.h @@ -0,0 +1,154 @@ +#ifndef _I386_ALTERNATIVE_H +#define _I386_ALTERNATIVE_H + +#include +#include +#include + +struct alt_instr { + u8 *instr; /* original instruction */ + u8 *replacement; + u8 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ + u8 pad; +}; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; +#ifdef CONFIG_SMP +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); +#else +static inline void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end) {} +static inline void alternatives_smp_module_del(struct module *mod) {} +static inline void alternatives_smp_switch(int smp) {} +#endif /* CONFIG_SMP */ + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */\ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement maake sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */\ + ".previous" :: "i" (feature), ##input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) + +/* + * use this macro(s) if you need more than one output parameter + * in alternative_io + */ +#define ASM_OUTPUT2(a, b) a, b + +/* + * Alternative inline assembly for SMP. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + " .align 4\n" \ + " .long 661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define LOCK_PREFIX "" +#endif + +struct paravirt_patch_site; +#ifdef CONFIG_PARAVIRT +void apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end); +#else +static inline void +apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end) +{} +#define __parainstructions NULL +#define __parainstructions_end NULL +#endif + +extern void text_poke(void *addr, unsigned char *opcode, int len); + +#endif /* _I386_ALTERNATIVE_H */ diff --git a/include/asm-x86/alternative_64.h b/include/asm-x86/alternative_64.h new file mode 100644 index 000000000000..ab161e810151 --- /dev/null +++ b/include/asm-x86/alternative_64.h @@ -0,0 +1,159 @@ +#ifndef _X86_64_ALTERNATIVE_H +#define _X86_64_ALTERNATIVE_H + +#ifdef __KERNEL__ + +#include +#include + +/* + * Alternative inline assembly for SMP. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + " .align 8\n" \ + " .quad 661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define LOCK_PREFIX "" +#endif + +/* This must be included *after* the definition of LOCK_PREFIX */ +#include + +struct alt_instr { + u8 *instr; /* original instruction */ + u8 *replacement; + u8 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ + u8 pad[5]; +}; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; + +#ifdef CONFIG_SMP +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); +#else +static inline void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end) {} +static inline void alternatives_smp_module_del(struct module *mod) {} +static inline void alternatives_smp_switch(int smp) {} +#endif + +#endif + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement make sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) + +/* + * use this macro(s) if you need more than one output parameter + * in alternative_io + */ +#define ASM_OUTPUT2(a, b) a, b + +struct paravirt_patch; +#ifdef CONFIG_PARAVIRT +void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); +#else +static inline void +apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) +{} +#define __parainstructions NULL +#define __parainstructions_end NULL +#endif + +extern void text_poke(void *addr, unsigned char *opcode, int len); + +#endif /* _X86_64_ALTERNATIVE_H */ diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h new file mode 100644 index 000000000000..9fbcc0bd2ac4 --- /dev/null +++ b/include/asm-x86/apic.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "apic_32.h" +#else +# include "apic_64.h" +#endif diff --git a/include/asm-x86/apic_32.h b/include/asm-x86/apic_32.h new file mode 100644 index 000000000000..4091b33dcb10 --- /dev/null +++ b/include/asm-x86/apic_32.h @@ -0,0 +1,126 @@ +#ifndef __ASM_APIC_H +#define __ASM_APIC_H + +#include +#include +#include +#include +#include +#include + +#define Dprintk(x...) + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +extern int apic_verbosity; + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + + +extern void generic_apic_probe(void); + +#ifdef CONFIG_X86_LOCAL_APIC + +/* + * Basic functions accessing APICs. + */ +#ifdef CONFIG_PARAVIRT +#include +#else +#define apic_write native_apic_write +#define apic_write_atomic native_apic_write_atomic +#define apic_read native_apic_read +#define setup_boot_clock setup_boot_APIC_clock +#define setup_secondary_clock setup_secondary_APIC_clock +#endif + +static __inline fastcall void native_apic_write(unsigned long reg, + unsigned long v) +{ + *((volatile unsigned long *)(APIC_BASE+reg)) = v; +} + +static __inline fastcall void native_apic_write_atomic(unsigned long reg, + unsigned long v) +{ + xchg((volatile unsigned long *)(APIC_BASE+reg), v); +} + +static __inline fastcall unsigned long native_apic_read(unsigned long reg) +{ + return *((volatile unsigned long *)(APIC_BASE+reg)); +} + +void apic_wait_icr_idle(void); +unsigned long safe_apic_wait_icr_idle(void); +int get_physical_broadcast(void); + +#ifdef CONFIG_X86_GOOD_APIC +# define FORCE_READ_AROUND_WRITE 0 +# define apic_read_around(x) +# define apic_write_around(x,y) apic_write((x),(y)) +#else +# define FORCE_READ_AROUND_WRITE 1 +# define apic_read_around(x) apic_read(x) +# define apic_write_around(x,y) apic_write_atomic((x),(y)) +#endif + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction: + * - a single rmw on Pentium/82489DX + * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) + * ... yummie. + */ + + /* Docs say use 0 for future compatibility */ + apic_write_around(APIC_EOI, 0); +} + +extern int lapic_get_maxlvt(void); +extern void clear_local_APIC(void); +extern void connect_bsp_APIC (void); +extern void disconnect_bsp_APIC (int virt_wire_setup); +extern void disable_local_APIC (void); +extern void lapic_shutdown (void); +extern int verify_local_APIC (void); +extern void cache_APIC_registers (void); +extern void sync_Arb_IDs (void); +extern void init_bsp_APIC (void); +extern void setup_local_APIC (void); +extern void init_apic_mappings (void); +extern void smp_local_timer_interrupt (void); +extern void setup_boot_APIC_clock (void); +extern void setup_secondary_APIC_clock (void); +extern int APIC_init_uniprocessor (void); + +extern void enable_NMI_through_LVT0 (void * dummy); + +#define ARCH_APICTIMER_STOPS_ON_C3 1 + +extern int timer_over_8254; +extern int local_apic_timer_c2_ok; + +extern int local_apic_timer_disabled; + +#else /* !CONFIG_X86_LOCAL_APIC */ +static inline void lapic_shutdown(void) { } + +#endif /* !CONFIG_X86_LOCAL_APIC */ + +#endif /* __ASM_APIC_H */ diff --git a/include/asm-x86/apic_64.h b/include/asm-x86/apic_64.h new file mode 100644 index 000000000000..85125ef3c414 --- /dev/null +++ b/include/asm-x86/apic_64.h @@ -0,0 +1,107 @@ +#ifndef __ASM_APIC_H +#define __ASM_APIC_H + +#include +#include +#include +#include +#include + +#define Dprintk(x...) + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +extern int apic_verbosity; +extern int apic_runs_main_timer; +extern int ioapic_force; +extern int apic_mapped; + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + +struct pt_regs; + +/* + * Basic functions accessing APICs. + */ + +static __inline void apic_write(unsigned long reg, unsigned int v) +{ + *((volatile unsigned int *)(APIC_BASE+reg)) = v; +} + +static __inline unsigned int apic_read(unsigned long reg) +{ + return *((volatile unsigned int *)(APIC_BASE+reg)); +} + +extern void apic_wait_icr_idle(void); +extern unsigned int safe_apic_wait_icr_idle(void); + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction: + * - a single rmw on Pentium/82489DX + * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) + * ... yummie. + */ + + /* Docs say use 0 for future compatibility */ + apic_write(APIC_EOI, 0); +} + +extern int get_maxlvt (void); +extern void clear_local_APIC (void); +extern void connect_bsp_APIC (void); +extern void disconnect_bsp_APIC (int virt_wire_setup); +extern void disable_local_APIC (void); +extern int verify_local_APIC (void); +extern void cache_APIC_registers (void); +extern void sync_Arb_IDs (void); +extern void init_bsp_APIC (void); +extern void setup_local_APIC (void); +extern void init_apic_mappings (void); +extern void smp_local_timer_interrupt (void); +extern void setup_boot_APIC_clock (void); +extern void setup_secondary_APIC_clock (void); +extern int APIC_init_uniprocessor (void); +extern void disable_APIC_timer(void); +extern void enable_APIC_timer(void); +extern void setup_apic_routing(void); + +extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, + unsigned char msg_type, unsigned char mask); + +extern int apic_is_clustered_box(void); + +#define K8_APIC_EXT_LVT_BASE 0x500 +#define K8_APIC_EXT_INT_MSG_FIX 0x0 +#define K8_APIC_EXT_INT_MSG_SMI 0x2 +#define K8_APIC_EXT_INT_MSG_NMI 0x4 +#define K8_APIC_EXT_INT_MSG_EXT 0x7 +#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 + +void smp_send_timer_broadcast_ipi(void); +void switch_APIC_timer_to_ipi(void *cpumask); +void switch_ipi_to_APIC_timer(void *cpumask); + +#define ARCH_APICTIMER_STOPS_ON_C3 1 + +extern unsigned boot_cpu_id; +extern int local_apic_timer_c2_ok; + +#endif /* __ASM_APIC_H */ diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h new file mode 100644 index 000000000000..4542c220bf4d --- /dev/null +++ b/include/asm-x86/apicdef.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "apicdef_32.h" +#else +# include "apicdef_64.h" +#endif diff --git a/include/asm-x86/apicdef_32.h b/include/asm-x86/apicdef_32.h new file mode 100644 index 000000000000..9f6995341fdc --- /dev/null +++ b/include/asm-x86/apicdef_32.h @@ -0,0 +1,375 @@ +#ifndef __ASM_APICDEF_H +#define __ASM_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox , 1995. + * Ingo Molnar , 1999, 2000 + */ + +#define APIC_DEFAULT_PHYS_BASE 0xfee00000 + +#define APIC_ID 0x20 +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define GET_APIC_VERSION(x) ((x)&0xFF) +#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) +#define APIC_INTEGRATED(x) ((x)&0xF0) +#define APIC_XAPIC(x) ((x) >= 0x14) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFF +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFF +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFF<<24) +#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) +#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) +#define APIC_ALL_CPUS 0xFF +#define APIC_DFR 0xE0 +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_FOCUS_DISABLED (1<<9) +#define APIC_SPIV_APIC_ENABLED (1<<8) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_ICR 0x300 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) +#define SET_APIC_DEST_FIELD(x) ((x)<<24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) +#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) +#define SET_APIC_TIMER_BASE(x) (((x)<<18)) +#define APIC_TIMER_BASE_CLKIN 0x0 +#define APIC_TIMER_BASE_TMBASE 0x1 +#define APIC_TIMER_BASE_DIV 0x2 +#define APIC_LVT_TIMER_PERIODIC (1<<17) +#define APIC_LVT_MASKED (1<<16) +#define APIC_LVT_LEVEL_TRIGGER (1<<15) +#define APIC_LVT_REMOTE_IRR (1<<14) +#define APIC_INPUT_POLARITY (1<<13) +#define APIC_SEND_PENDING (1<<12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) +#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_TDR_DIV_TMBASE (1<<2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA + +#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) + +#define MAX_IO_APICS 64 + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved[4]; } __reserved_02; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { /* LVT - Thermal Sensor */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_thermal; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); + +#undef u32 + +#endif diff --git a/include/asm-x86/apicdef_64.h b/include/asm-x86/apicdef_64.h new file mode 100644 index 000000000000..1dd40067c67c --- /dev/null +++ b/include/asm-x86/apicdef_64.h @@ -0,0 +1,392 @@ +#ifndef __ASM_APICDEF_H +#define __ASM_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox , 1995. + * Ingo Molnar , 1999, 2000 + */ + +#define APIC_DEFAULT_PHYS_BASE 0xfee00000 + +#define APIC_ID 0x20 +#define APIC_ID_MASK (0xFFu<<24) +#define GET_APIC_ID(x) (((x)>>24)&0xFFu) +#define SET_APIC_ID(x) (((x)<<24)) +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define GET_APIC_VERSION(x) ((x)&0xFFu) +#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) +#define APIC_INTEGRATED(x) ((x)&0xF0u) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFFu +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFFu +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFFu<<24) +#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) +#define APIC_ALL_CPUS 0xFFu +#define APIC_DFR 0xE0 +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_FOCUS_DISABLED (1<<9) +#define APIC_SPIV_APIC_ENABLED (1<<8) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_ICR 0x300 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) +#define SET_APIC_DEST_FIELD(x) ((x)<<24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) +#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) +#define SET_APIC_TIMER_BASE(x) (((x)<<18)) +#define APIC_TIMER_BASE_CLKIN 0x0 +#define APIC_TIMER_BASE_TMBASE 0x1 +#define APIC_TIMER_BASE_DIV 0x2 +#define APIC_LVT_TIMER_PERIODIC (1<<17) +#define APIC_LVT_MASKED (1<<16) +#define APIC_LVT_LEVEL_TRIGGER (1<<15) +#define APIC_LVT_REMOTE_IRR (1<<14) +#define APIC_INPUT_POLARITY (1<<13) +#define APIC_SEND_PENDING (1<<12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) +#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_TDR_DIV_TMBASE (1<<2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA + +#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) + +#define MAX_IO_APICS 128 +#define MAX_LOCAL_APIC 256 + +/* + * All x86-64 systems are xAPIC compatible. + * In the following, "apicid" is a physical APIC ID. + */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) +#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) +#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) +#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) +#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved[4]; } __reserved_02; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { /* LVT - Thermal Sensor */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_thermal; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); + +#undef u32 + +#define BAD_APICID 0xFFu + +#endif diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h new file mode 100644 index 000000000000..a8c1fca9726d --- /dev/null +++ b/include/asm-x86/arch_hooks.h @@ -0,0 +1,30 @@ +#ifndef _ASM_ARCH_HOOKS_H +#define _ASM_ARCH_HOOKS_H + +#include + +/* + * linux/include/asm/arch_hooks.h + * + * define the architecture specific hooks + */ + +/* these aren't arch hooks, they are generic routines + * that can be used by the hooks */ +extern void init_ISA_irqs(void); +extern void apic_intr_init(void); +extern void smp_intr_init(void); +extern irqreturn_t timer_interrupt(int irq, void *dev_id); + +/* these are the defined hooks */ +extern void intr_init_hook(void); +extern void pre_intr_init_hook(void); +extern void pre_setup_arch_hook(void); +extern void trap_init_hook(void); +extern void time_init_hook(void); +extern void mca_nmi_hook(void); + +extern int setup_early_printk(char *); +extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); + +#endif diff --git a/include/asm-x86/atomic.h b/include/asm-x86/atomic.h new file mode 100644 index 000000000000..4e1b8873c474 --- /dev/null +++ b/include/asm-x86/atomic.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "atomic_32.h" +#else +# include "atomic_64.h" +#endif diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h new file mode 100644 index 000000000000..437aac801711 --- /dev/null +++ b/include/asm-x86/atomic_32.h @@ -0,0 +1,266 @@ +#ifndef __ARCH_I386_ATOMIC__ +#define __ARCH_I386_ATOMIC__ + +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +/* + * Make sure gcc doesn't try to be clever and move things around + * on us. We need to use _exactly_ the address the user gave us, + * not some alias that contains the same information. + */ +typedef struct { int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v,i) (((v)->counter) = (i)) + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +static __inline__ void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "addl %1,%0" + :"+m" (v->counter) + :"ir" (i)); +} + +/** + * atomic_sub - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +static __inline__ void atomic_sub(int i, atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "subl %1,%0" + :"+m" (v->counter) + :"ir" (i)); +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "subl %2,%0; sete %1" + :"+m" (v->counter), "=qm" (c) + :"ir" (i) : "memory"); + return c; +} + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +static __inline__ void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "incl %0" + :"+m" (v->counter)); +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +static __inline__ void atomic_dec(atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "decl %0" + :"+m" (v->counter)); +} + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __inline__ int atomic_dec_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "decl %0; sete %1" + :"+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_inc_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "incl %0; sete %1" + :"+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_add_negative - add and test if negative + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __inline__ int atomic_add_negative(int i, atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "addl %2,%0; sets %1" + :"+m" (v->counter), "=qm" (c) + :"ir" (i) : "memory"); + return c; +} + +/** + * atomic_add_return - add integer and return + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns @i + @v + */ +static __inline__ int atomic_add_return(int i, atomic_t *v) +{ + int __i; +#ifdef CONFIG_M386 + unsigned long flags; + if(unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; +#endif + /* Modern 486+ processor */ + __i = i; + __asm__ __volatile__( + LOCK_PREFIX "xaddl %0, %1" + :"+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + local_irq_save(flags); + __i = atomic_read(v); + atomic_set(v, i + __i); + local_irq_restore(flags); + return i + __i; +#endif +} + +/** + * atomic_sub_return - subtract integer and return + * @v: pointer of type atomic_t + * @i: integer value to subtract + * + * Atomically subtracts @i from @v and returns @v - @i + */ +static __inline__ int atomic_sub_return(int i, atomic_t *v) +{ + return atomic_add_return(-i,v); +} + +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) + +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_inc_return(v) (atomic_add_return(1,v)) +#define atomic_dec_return(v) (atomic_sub_return(1,v)) + +/* These are x86-specific, used by some header files */ +#define atomic_clear_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ +: : "r" (~(mask)),"m" (*addr) : "memory") + +#define atomic_set_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ +: : "r" (mask),"m" (*(addr)) : "memory") + +/* Atomic operations are already serializing on x86 */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include +#endif diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h new file mode 100644 index 000000000000..f2e64634fa48 --- /dev/null +++ b/include/asm-x86/atomic_64.h @@ -0,0 +1,466 @@ +#ifndef __ARCH_X86_64_ATOMIC__ +#define __ARCH_X86_64_ATOMIC__ + +#include +#include + +/* atomic_t should be 32 bit signed type */ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#ifdef CONFIG_SMP +#define LOCK "lock ; " +#else +#define LOCK "" +#endif + +/* + * Make sure gcc doesn't try to be clever and move things around + * on us. We need to use _exactly_ the address the user gave us, + * not some alias that contains the same information. + */ +typedef struct { int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v,i) (((v)->counter) = (i)) + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +static __inline__ void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "addl %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +} + +/** + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +static __inline__ void atomic_sub(int i, atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "subl %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "subl %2,%0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); + return c; +} + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +static __inline__ void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "incl %0" + :"=m" (v->counter) + :"m" (v->counter)); +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +static __inline__ void atomic_dec(atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "decl %0" + :"=m" (v->counter) + :"m" (v->counter)); +} + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __inline__ int atomic_dec_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "decl %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_inc_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "incl %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __inline__ int atomic_add_negative(int i, atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "addl %2,%0; sets %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); + return c; +} + +/** + * atomic_add_return - add and return + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static __inline__ int atomic_add_return(int i, atomic_t *v) +{ + int __i = i; + __asm__ __volatile__( + LOCK_PREFIX "xaddl %0, %1" + :"+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + +static __inline__ int atomic_sub_return(int i, atomic_t *v) +{ + return atomic_add_return(-i,v); +} + +#define atomic_inc_return(v) (atomic_add_return(1,v)) +#define atomic_dec_return(v) (atomic_sub_return(1,v)) + +/* An 64bit atomic type */ + +typedef struct { volatile long counter; } atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +/** + * atomic64_read - read atomic64 variable + * @v: pointer of type atomic64_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +#define atomic64_read(v) ((v)->counter) + +/** + * atomic64_set - set atomic64 variable + * @v: pointer to type atomic64_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic64_set(v,i) (((v)->counter) = (i)) + +/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v. + */ +static __inline__ void atomic64_add(long i, atomic64_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "addq %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +} + +/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v. + */ +static __inline__ void atomic64_sub(long i, atomic64_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "subq %1,%0" + :"=m" (v->counter) + :"ir" (i), "m" (v->counter)); +} + +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "subq %2,%0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); + return c; +} + +/** + * atomic64_inc - increment atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1. + */ +static __inline__ void atomic64_inc(atomic64_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "incq %0" + :"=m" (v->counter) + :"m" (v->counter)); +} + +/** + * atomic64_dec - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static __inline__ void atomic64_dec(atomic64_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "decq %0" + :"=m" (v->counter) + :"m" (v->counter)); +} + +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __inline__ int atomic64_dec_and_test(atomic64_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "decq %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic64_inc_and_test(atomic64_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "incq %0; sete %1" + :"=m" (v->counter), "=qm" (c) + :"m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __inline__ int atomic64_add_negative(long i, atomic64_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "addq %2,%0; sets %1" + :"=m" (v->counter), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); + return c; +} + +/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static __inline__ long atomic64_add_return(long i, atomic64_t *v) +{ + long __i = i; + __asm__ __volatile__( + LOCK_PREFIX "xaddq %0, %1;" + :"+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + +static __inline__ long atomic64_sub_return(long i, atomic64_t *v) +{ + return atomic64_add_return(-i,v); +} + +#define atomic64_inc_return(v) (atomic64_add_return(1,v)) +#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) + +#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) +#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +/* These are x86-specific, used by some header files */ +#define atomic_clear_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ +: : "r" (~(mask)),"m" (*addr) : "memory") + +#define atomic_set_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ +: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") + +/* Atomic operations are already serializing on x86 */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include +#endif diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h new file mode 100644 index 000000000000..7ff866f829ca --- /dev/null +++ b/include/asm-x86/auxvec.h @@ -0,0 +1,13 @@ +#ifdef __KERNEL__ +# ifdef CONFIG_X86_32 +# include "auxvec_32.h" +# else +# include "auxvec_64.h" +# endif +#else +# ifdef __i386__ +# include "auxvec_32.h" +# else +# include "auxvec_64.h" +# endif +#endif diff --git a/include/asm-x86/auxvec_32.h b/include/asm-x86/auxvec_32.h new file mode 100644 index 000000000000..395e13016bfb --- /dev/null +++ b/include/asm-x86/auxvec_32.h @@ -0,0 +1,11 @@ +#ifndef __ASMi386_AUXVEC_H +#define __ASMi386_AUXVEC_H + +/* + * Architecture-neutral AT_ values in 0-17, leave some room + * for more of them, start the x86-specific ones at 32. + */ +#define AT_SYSINFO 32 +#define AT_SYSINFO_EHDR 33 + +#endif diff --git a/include/asm-x86/auxvec_64.h b/include/asm-x86/auxvec_64.h new file mode 100644 index 000000000000..1d5ab0d03950 --- /dev/null +++ b/include/asm-x86/auxvec_64.h @@ -0,0 +1,6 @@ +#ifndef __ASM_X86_64_AUXVEC_H +#define __ASM_X86_64_AUXVEC_H + +#define AT_SYSINFO_EHDR 33 + +#endif diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h new file mode 100644 index 000000000000..07e3f6d4fe47 --- /dev/null +++ b/include/asm-x86/bitops.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_X86_32 +# include "bitops_32.h" +#else +# include "bitops_64.h" +#endif diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h new file mode 100644 index 000000000000..a20fe9822f60 --- /dev/null +++ b/include/asm-x86/bitops_32.h @@ -0,0 +1,423 @@ +#ifndef _I386_BITOPS_H +#define _I386_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +#include +#include + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ + +#define ADDR (*(volatile long *) addr) + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btsl %1,%0" + :"+m" (ADDR) + :"Ir" (nr)); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long * addr) +{ + __asm__( + "btsl %1,%0" + :"+m" (ADDR) + :"Ir" (nr)); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btrl %1,%0" + :"+m" (ADDR) + :"Ir" (nr)); +} + +static inline void __clear_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( + "btrl %1,%0" + :"+m" (ADDR) + :"Ir" (nr)); +} +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( + "btcl %1,%0" + :"+m" (ADDR) + :"Ir" (nr)); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btcl %1,%0" + :"+m" (ADDR) + :"Ir" (nr)); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btsl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) +{ + int oldbit; + + __asm__( + "btsl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr)); + return oldbit; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__( + "btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr)); + return oldbit; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ __volatile__( + "btcl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long* addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btcl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static int test_bit(int nr, const volatile void * addr); +#endif + +static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) +{ + return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; +} + +static inline int variable_test_bit(int nr, const volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__( + "btl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit) + :"m" (ADDR),"Ir" (nr)); + return oldbit; +} + +#define test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + constant_test_bit((nr),(addr)) : \ + variable_test_bit((nr),(addr))) + +#undef ADDR + +/** + * find_first_zero_bit - find the first zero bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first zero bit, not the number of the byte + * containing a bit. + */ +static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) +{ + int d0, d1, d2; + int res; + + if (!size) + return 0; + /* This looks at memory. Mark it volatile to tell gcc not to move it around */ + __asm__ __volatile__( + "movl $-1,%%eax\n\t" + "xorl %%edx,%%edx\n\t" + "repe; scasl\n\t" + "je 1f\n\t" + "xorl -4(%%edi),%%eax\n\t" + "subl $4,%%edi\n\t" + "bsfl %%eax,%%edx\n" + "1:\tsubl %%ebx,%%edi\n\t" + "shll $3,%%edi\n\t" + "addl %%edi,%%edx" + :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) + :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); + return res; +} + +/** + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_zero_bit(const unsigned long *addr, int size, int offset); + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + __asm__("bsfl %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ +static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) +{ + unsigned x = 0; + + while (x < size) { + unsigned long val = *addr++; + if (val) + return __ffs(val) + x; + x += (sizeof(*addr)<<3); + } + return x; +} + +/** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_bit(const unsigned long *addr, int size, int offset); + +/** + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static inline unsigned long ffz(unsigned long word) +{ + __asm__("bsfl %1,%0" + :"=r" (word) + :"r" (~word)); + return word; +} + +#ifdef __KERNEL__ + +#include + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz() (man ffs). + */ +static inline int ffs(int x) +{ + int r; + + __asm__("bsfl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return r+1; +} + +/** + * fls - find last bit set + * @x: the word to search + * + * This is defined the same way as ffs(). + */ +static inline int fls(int x) +{ + int r; + + __asm__("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return r+1; +} + +#include + +#endif /* __KERNEL__ */ + +#include + +#ifdef __KERNEL__ + +#include + +#define ext2_set_bit_atomic(lock,nr,addr) \ + test_and_set_bit((nr),(unsigned long*)addr) +#define ext2_clear_bit_atomic(lock,nr, addr) \ + test_and_clear_bit((nr),(unsigned long*)addr) + +#include + +#endif /* __KERNEL__ */ + +#endif /* _I386_BITOPS_H */ diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h new file mode 100644 index 000000000000..d4dbbe5f7bd9 --- /dev/null +++ b/include/asm-x86/bitops_64.h @@ -0,0 +1,427 @@ +#ifndef _X86_64_BITOPS_H +#define _X86_64_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +#include + +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) +/* Technically wrong, but this avoids compilation errors on some gcc + versions. */ +#define ADDR "=m" (*(volatile long *) addr) +#else +#define ADDR "+m" (*(volatile long *) addr) +#endif + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void set_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btsl %1,%0" + :ADDR + :"dIr" (nr) : "memory"); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __set_bit(int nr, volatile void * addr) +{ + __asm__ volatile( + "btsl %1,%0" + :ADDR + :"dIr" (nr) : "memory"); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __inline__ void clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btrl %1,%0" + :ADDR + :"dIr" (nr)); +} + +static __inline__ void __clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( + "btrl %1,%0" + :ADDR + :"dIr" (nr)); +} + +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __change_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( + "btcl %1,%0" + :ADDR + :"dIr" (nr)); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void change_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btcl %1,%0" + :ADDR + :"dIr" (nr)); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_set_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btsl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),ADDR + :"dIr" (nr) : "memory"); + return oldbit; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__( + "btsl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),ADDR + :"dIr" (nr)); + return oldbit; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),ADDR + :"dIr" (nr) : "memory"); + return oldbit; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__( + "btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),ADDR + :"dIr" (nr)); + return oldbit; +} + +/* WARNING: non atomic and it can be reordered! */ +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__ __volatile__( + "btcl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),ADDR + :"dIr" (nr) : "memory"); + return oldbit; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_change_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btcl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),ADDR + :"dIr" (nr) : "memory"); + return oldbit; +} + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static int test_bit(int nr, const volatile void * addr); +#endif + +static __inline__ int constant_test_bit(int nr, const volatile void * addr) +{ + return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; +} + +static __inline__ int variable_test_bit(int nr, volatile const void * addr) +{ + int oldbit; + + __asm__ __volatile__( + "btl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit) + :"m" (*(volatile long *)addr),"dIr" (nr)); + return oldbit; +} + +#define test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + constant_test_bit((nr),(addr)) : \ + variable_test_bit((nr),(addr))) + +#undef ADDR + +extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); +extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); +extern long find_first_bit(const unsigned long * addr, unsigned long size); +extern long find_next_bit(const unsigned long * addr, long size, long offset); + +/* return index of first bet set in val or max when no bit is set */ +static inline unsigned long __scanbit(unsigned long val, unsigned long max) +{ + asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); + return val; +} + +#define find_first_bit(addr,size) \ +((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ + (__scanbit(*(unsigned long *)addr,(size))) : \ + find_first_bit(addr,size))) + +#define find_next_bit(addr,size,off) \ +((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ + ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ + find_next_bit(addr,size,off))) + +#define find_first_zero_bit(addr,size) \ +((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ + (__scanbit(~*(unsigned long *)addr,(size))) : \ + find_first_zero_bit(addr,size))) + +#define find_next_zero_bit(addr,size,off) \ +((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ + ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ + find_next_zero_bit(addr,size,off))) + +/* + * Find string of zero bits in a bitmap. -1 when not found. + */ +extern unsigned long +find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); + +static inline void set_bit_string(unsigned long *bitmap, unsigned long i, + int len) +{ + unsigned long end = i + len; + while (i < end) { + __set_bit(i, bitmap); + i++; + } +} + +static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, + int len) +{ + unsigned long end = i + len; + while (i < end) { + __clear_bit(i, bitmap); + i++; + } +} + +/** + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static __inline__ unsigned long ffz(unsigned long word) +{ + __asm__("bsfq %1,%0" + :"=r" (word) + :"r" (~word)); + return word; +} + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long __ffs(unsigned long word) +{ + __asm__("bsfq %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +/* + * __fls: find last bit set. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static __inline__ unsigned long __fls(unsigned long word) +{ + __asm__("bsrq %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +#ifdef __KERNEL__ + +#include + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __inline__ int ffs(int x) +{ + int r; + + __asm__("bsfl %1,%0\n\t" + "cmovzl %2,%0" + : "=r" (r) : "rm" (x), "r" (-1)); + return r+1; +} + +/** + * fls64 - find last bit set in 64 bit word + * @x: the word to search + * + * This is defined the same way as fls. + */ +static __inline__ int fls64(__u64 x) +{ + if (x == 0) + return 0; + return __fls(x) + 1; +} + +/** + * fls - find last bit set + * @x: the word to search + * + * This is defined the same way as ffs. + */ +static __inline__ int fls(int x) +{ + int r; + + __asm__("bsrl %1,%0\n\t" + "cmovzl %2,%0" + : "=&r" (r) : "rm" (x), "rm" (-1)); + return r+1; +} + +#define ARCH_HAS_FAST_MULTIPLIER 1 + +#include + +#endif /* __KERNEL__ */ + +#ifdef __KERNEL__ + +#include + +#define ext2_set_bit_atomic(lock,nr,addr) \ + test_and_set_bit((nr),(unsigned long*)addr) +#define ext2_clear_bit_atomic(lock,nr,addr) \ + test_and_clear_bit((nr),(unsigned long*)addr) + +#include + +#endif /* __KERNEL__ */ + +#endif /* _X86_64_BITOPS_H */ diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h new file mode 100644 index 000000000000..ed8affbf96cb --- /dev/null +++ b/include/asm-x86/boot.h @@ -0,0 +1,20 @@ +#ifndef _ASM_BOOT_H +#define _ASM_BOOT_H + +/* Don't touch these, unless you really know what you're doing. */ +#define DEF_INITSEG 0x9000 +#define DEF_SYSSEG 0x1000 +#define DEF_SETUPSEG 0x9020 +#define DEF_SYSSIZE 0x7F00 + +/* Internal svga startup constants */ +#define NORMAL_VGA 0xffff /* 80x25 mode */ +#define EXTENDED_VGA 0xfffe /* 80x50 mode */ +#define ASK_VGA 0xfffd /* ask for it at bootup */ + +/* Physical address where kernel should be loaded. */ +#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + + (CONFIG_PHYSICAL_ALIGN - 1)) \ + & ~(CONFIG_PHYSICAL_ALIGN - 1)) + +#endif /* _ASM_BOOT_H */ diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h new file mode 100644 index 000000000000..b91b01783e4b --- /dev/null +++ b/include/asm-x86/bootparam.h @@ -0,0 +1,86 @@ +#ifndef _ASM_BOOTPARAM_H +#define _ASM_BOOTPARAM_H + +#include +#include +#include +#include +#include +#include +#include