aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSimon Arlott <simon@fire.lp0.eu>2007-10-20 01:25:36 +0200
committerAdrian Bunk <bunk@kernel.org>2007-10-20 01:25:36 +0200
commit676b1855de0a18100b3c340084eb8ef72bde4fb1 (patch)
treecbcbe6dec24a23f97f93ec7753ab74d34a92473a /arch/x86
parentspelling fixes: arch/v850/ (diff)
downloadlinux-dev-676b1855de0a18100b3c340084eb8ef72bde4fb1.tar.xz
linux-dev-676b1855de0a18100b3c340084eb8ef72bde4fb1.zip
spelling fixes: arch/x86_64/
Spelling fixes in arch/x86_64/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Adrian Bunk <bunk@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/compressed/misc_64.c4
-rw-r--r--arch/x86/kernel/io_apic_64.c4
-rw-r--r--arch/x86/kernel/mce_64.c4
-rw-r--r--arch/x86/kernel/signal_64.c2
-rw-r--r--arch/x86/kernel/smpboot_64.c2
-rw-r--r--arch/x86/kernel/traps_64.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/mm/fault_64.c4
-rw-r--r--arch/x86/mm/srat_64.c2
9 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c
index f932b0e89096..6ea015aa65e4 100644
--- a/arch/x86/boot/compressed/misc_64.c
+++ b/arch/x86/boot/compressed/misc_64.c
@@ -25,7 +25,7 @@
/*
* Getting to provable safe in place decompression is hard.
- * Worst case behaviours need to be analized.
+ * Worst case behaviours need to be analyzed.
* Background information:
*
* The file layout is:
@@ -94,7 +94,7 @@
* Adding 32768 instead of 32767 just makes for round numbers.
* Adding the decompressor_size is necessary as it musht live after all
* of the data as well. Last I measured the decompressor is about 14K.
- * 10K of actuall data and 4K of bss.
+ * 10K of actual data and 4K of bss.
*
*/
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 1c2c7bf6a9d3..b3c2d268d708 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck);
/*
*
- * IRQ's that are handled by the PIC in the MPS IOAPIC case.
+ * IRQs that are handled by the PIC in the MPS IOAPIC case.
* - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
* Linux doesn't really care, as it's not actually used
* for any interrupt handling anyway.
@@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq)
}
/*
- * MSI mesage composition
+ * MSI message composition
*/
#ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 66e6b797b2cb..82c85bdff3a1 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
#ifdef CONFIG_X86_MCE_INTEL
/***
* mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
- * @cpu: The CPU on which the event occured.
+ * @cpu: The CPU on which the event occurred.
* @status: Event status information
*
* This function should be called by the thermal interrupt after the
@@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str)
return 1;
}
-/* mce=off disables machine check. Note you can reenable it later
+/* mce=off disables machine check. Note you can re-enable it later
using sysfs.
mce=TOLERANCELEVEL (number, see above)
mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 683802bec419..ab086b0357fc 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Reenable any watchpoints before delivering the
+ /* Re-enable any watchpoints before delivering the
* signal to user space. The processor register will
* have been cleared if the watchpoint triggered
* inside the kernel.
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index e351ac4ab5b1..d4c33aba3ff2 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -350,7 +350,7 @@ void __cpuinit start_secondary(void)
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
- * IPI receipients, and the time when the determination is made
+ * IPI recipients, and the time when the determination is made
* for which cpus receive the IPI in genapic_flat.c. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index b4a9b3db1994..b4c887341a26 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
#define MSG(txt) ops->warning(data, txt)
/*
- * x86-64 can have upto three kernel stacks:
+ * x86-64 can have up to three kernel stacks:
* process stack
* interrupt stack
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 585541ca1a7e..e14cb3f53862 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -53,7 +53,7 @@
/*
* vsyscall_gtod_data contains data that is :
* - readonly from vsyscalls
- * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
+ * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
* Try to keep this structure as small as possible to avoid cache line ping pongs
*/
int __vgetcpu_mode __section_vgetcpu_mode;
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 5149ac136a5d..7c560843ded1 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -378,7 +378,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
again:
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
@@ -386,7 +386,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 56089ccc3949..ea85172fc0cc 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;}
/*
* Update nodes_add and decide if to include add are in the zone.
* Both SPARSE and RESERVE need nodes_add infomation.
- * This code supports one contigious hot add area per node.
+ * This code supports one contiguous hot add area per node.
*/
static int reserve_hotadd(int node, unsigned long start, unsigned long end)
{