From 2f72d4f6a29cf84c40fc05c76020b347b4774393 Mon Sep 17 00:00:00 2001 From: Chris Zankel Date: Tue, 23 Oct 2012 20:17:05 -0700 Subject: xtensa: allow multi-inclusion for uapi/unistd.h Xtensa implements a method that allows to generate a arbitrary output for each system call by defining the __SYSCALL(number, function, num_args). This usually requires to include uapi/unistd.h twice. Instead of removing the guard agains multiple inclusion entirely, allow to include unistd.h again only if __SYSCALL is defined. Note that __SYSCALL gets always undefined at the end of the file. Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/unistd.h | 14 ++++---------- arch/xtensa/include/uapi/asm/unistd.h | 14 +++----------- arch/xtensa/kernel/syscall.c | 7 +++---- 3 files changed, 10 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index 9ef1c31d2c83..d9fa52dbfba0 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -1,16 +1,8 @@ -/* - * include/asm-xtensa/unistd.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ +#ifndef _XTENSA_UNISTD_H +#define _XTENSA_UNISTD_H #include - /* * "Conditional" syscalls * @@ -37,3 +29,5 @@ #define __IGNORE_mmap /* use mmap2 */ #define __IGNORE_vfork /* use clone */ #define __IGNORE_fadvise64 /* use fadvise64_64 */ + +#endif /* _XTENSA_UNISTD_H */ diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index 479abaea5aae..1fd157f31211 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -1,14 +1,4 @@ -/* - * include/asm-xtensa/unistd.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2012 Tensilica Inc. - */ - -#ifndef _UAPI_XTENSA_UNISTD_H +#if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL) #define _UAPI_XTENSA_UNISTD_H #ifndef __SYSCALL @@ -759,4 +749,6 @@ __SYSCALL(331, sys_kcmp, 5) #define SYS_XTENSA_COUNT 5 /* count */ +#undef __SYSCALL + #endif /* _UAPI_XTENSA_UNISTD_H */ diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index a5c01e74d5d5..5702065f472a 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -32,10 +32,8 @@ typedef void (*syscall_t)(void); syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall, -#undef __SYSCALL #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol, -#undef __KERNEL_SYSCALLS__ -#include +#include }; asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) @@ -49,7 +47,8 @@ asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) return (long)ret; } -asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len) +asmlinkage long xtensa_fadvise64_64(int fd, int advice, + unsigned long long offset, unsigned long long len) { return sys_fadvise64_64(fd, offset, len, advice); } -- cgit v1.2.3-59-g8ed1b From cddfcbcd05851a3b720055d40fc61c63ea1c2bd1 Mon Sep 17 00:00:00 2001 From: Chris Zankel Date: Tue, 23 Oct 2012 20:25:37 -0700 Subject: xtensa: use physical addresses for bus addresses Define virt_to_bus and bus_to_virt as virt_to_phys, and phys_to_virt, respectively. Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/io.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index e6be5b9091c2..700c2e6f2d25 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -62,6 +62,10 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) static inline void iounmap(volatile void __iomem *addr) { } + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + #endif /* CONFIG_MMU */ /* -- cgit v1.2.3-59-g8ed1b From 6ebe7da25b7879194fdb5c43ea67b383dd9014d6 Mon Sep 17 00:00:00 2001 From: Chris Zankel Date: Wed, 24 Oct 2012 13:15:21 -0700 Subject: xtensa: reset windowbase/windowstart when cloning the VM When we copy a user thread with CLONE_VM, we also have to reset windowbase and windowstart to start a pristine stack frame. Otherwise, overflows can happen using the address 0 as the stack pointer. Also add a special case for vfork, which continues on the parent stack until it calls execve. Because this could be a call8, we need to spill the stack pointer of the previus frame (if still 'live' in the register file). Signed-off-by: Chris Zankel --- arch/xtensa/kernel/process.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 1908f6642d31..341c151af919 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -220,8 +220,32 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (user_mode(regs)) { childregs->areg[1] = usp; + + /* When sharing memory with the parent thread, the child + usually starts on a pristine stack, so we have to reset + windowbase, windowstart and wmask. + (Note that such a new thread is required to always create + an initial call4 frame) + The exception is vfork, where the new thread continues to + run on the parent's stack until it calls execve. This could + be a call8 or call12, which requires a legal stack frame + of the previous caller for the overflow handlers to work. + (Note that it's always legal to overflow live registers). + In this case, ensure to spill at least the stack pointer + of that frame. */ + if (clone_flags & CLONE_VM) { - childregs->wmask = 1; /* can't share live windows */ + /* check that caller window is live and same stack */ + int len = childregs->wmask & ~0xf; + if (regs->areg[1] == usp && len != 0) { + int callinc = (regs->areg[0] >> 30) & 3; + int caller_ars = XCHAL_NUM_AREGS - callinc * 4; + put_user(regs->areg[caller_ars+1], + (unsigned __user*)(usp - 12)); + } + childregs->wmask = 1; + childregs->windowstart = 1; + childregs->windowbase = 0; } else { int len = childregs->wmask & ~0xf; memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], -- cgit v1.2.3-59-g8ed1b From 3306a72669a2af23b324540b08e68e82ac5b7dab Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 25 Oct 2012 11:10:50 +0400 Subject: xtensa: switch to generic kernel_thread() Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/processor.h | 4 +- arch/xtensa/kernel/entry.S | 41 +++++++------------- arch/xtensa/kernel/process.c | 77 ++++++++++++++++++++++--------------- arch/xtensa/kernel/xtensa_ksyms.c | 1 - 5 files changed, 61 insertions(+), 63 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index cdcb48adee4c..9525b70a13a1 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -13,6 +13,7 @@ config XTENSA select GENERIC_CPU_DEVICES select MODULES_USE_ELF_RELA select GENERIC_PCI_IOMAP + select GENERIC_KERNEL_THREAD select ARCH_WANT_OPTIONAL_GPIOLIB help Xtensa processors are 32-bit RISC machines designed by Tensilica diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 5c371d8d4528..2d630e7399ca 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -152,6 +152,7 @@ struct thread_struct { /* Clearing a0 terminates the backtrace. */ #define start_thread(regs, new_pc, new_sp) \ + memset(regs, 0, sizeof(*regs)); \ regs->pc = new_pc; \ regs->ps = USER_PS_VALUE; \ regs->areg[1] = new_sp; \ @@ -168,9 +169,6 @@ struct mm_struct; /* Free all resources held by a thread. */ #define release_thread(thread) do { } while(0) -/* Create a kernel thread without removing it from tasklists */ -extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); - /* Copy and release all segment info associated with a VM */ #define copy_segments(p, mm) do { } while(0) #define release_segments(mm) do { } while(0) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 18453067c258..d4ec1381aebc 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1832,34 +1832,6 @@ ENTRY(system_call) retw -/* - * Create a kernel thread - * - * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) - * a2 a2 a3 a4 - */ - -ENTRY(kernel_thread) - entry a1, 16 - - mov a5, a2 # preserve fn over syscall - mov a7, a3 # preserve args over syscall - - movi a3, _CLONE_VM | _CLONE_UNTRACED - movi a2, __NR_clone - or a6, a4, a3 # arg0: flags - mov a3, a1 # arg1: sp - syscall - - beq a3, a1, 1f # branch if parent - mov a6, a7 # args - callx4 a5 # fn(args) - - movi a2, __NR_exit - syscall # return value of fn(args) still in a6 - -1: retw - /* * Do a system call from kernel instead of calling sys_execve, so we end up * with proper pt_regs. @@ -1958,3 +1930,16 @@ ENTRY(ret_from_fork) j common_exception_return +/* + * Kernel thread creation helper + * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg + * left from _switch_to: a6 = prev + */ +ENTRY(ret_from_kernel_thread) + + call4 schedule_tail + mov a6, a3 + callx4 a2 + call4 do_exit + +ENDPROC(ret_from_kernel_thread) diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 341c151af919..be52fe437c1b 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -45,6 +45,7 @@ #include extern void ret_from_fork(void); +extern void ret_from_kernel_thread(void); struct task_struct *current_set[NR_CPUS] = {&init_task, }; @@ -158,18 +159,30 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * Copy thread. * + * There are two modes in which this function is called: + * 1) Userspace thread creation, + * regs != NULL, usp_thread_fn is userspace stack pointer. + * It is expected to copy parent regs (in case CLONE_VM is not set + * in the clone_flags) and set up passed usp in the childregs. + * 2) Kernel thread creation, + * regs == NULL, usp_thread_fn is the function to run in the new thread + * and thread_fn_arg is its parameter. + * childregs are not used for the kernel threads. + * * The stack layout for the new thread looks like this: * - * +------------------------+ <- sp in childregs (= tos) + * +------------------------+ * | childregs | * +------------------------+ <- thread.sp = sp in dummy-frame * | dummy-frame | (saved in dummy-frame spill-area) * +------------------------+ * - * We create a dummy frame to return to ret_from_fork: - * a0 points to ret_from_fork (simulating a call4) + * We create a dummy frame to return to either ret_from_fork or + * ret_from_kernel_thread: + * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4) * sp points to itself (thread.sp) - * a2, a3 are unused. + * a2, a3 are unused for userspace threads, + * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads. * * Note: This is a pristine frame, so we don't need any spill region on top of * childregs. @@ -185,41 +198,37 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long unused, - struct task_struct * p, struct pt_regs * regs) +int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, + unsigned long thread_fn_arg, + struct task_struct *p, struct pt_regs *unused) { - struct pt_regs *childregs; - unsigned long tos; - int user_mode = user_mode(regs); + struct pt_regs *childregs = task_pt_regs(p); #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) struct thread_info *ti; #endif - /* Set up new TSS. */ - tos = (unsigned long)task_stack_page(p) + THREAD_SIZE; - if (user_mode) - childregs = (struct pt_regs*)(tos - PT_USER_SIZE); - else - childregs = (struct pt_regs*)tos - 1; - - /* This does not copy all the regs. In a bout of brilliance or madness, - ARs beyond a0-a15 exist past the end of the struct. */ - *childregs = *regs; - /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ *((int*)childregs - 3) = (unsigned long)childregs; *((int*)childregs - 4) = 0; - childregs->areg[2] = 0; - p->set_child_tid = p->clear_child_tid = NULL; - p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); p->thread.sp = (unsigned long)childregs; - if (user_mode(regs)) { + if (!(p->flags & PF_KTHREAD)) { + struct pt_regs *regs = current_pt_regs(); + unsigned long usp = usp_thread_fn ? + usp_thread_fn : regs->areg[1]; + + p->thread.ra = MAKE_RA_FOR_CALL( + (unsigned long)ret_from_fork, 0x1); + /* This does not copy all the regs. + * In a bout of brilliance or madness, + * ARs beyond a0-a15 exist past the end of the struct. + */ + *childregs = *regs; childregs->areg[1] = usp; + childregs->areg[2] = 0; /* When sharing memory with the parent thread, the child usually starts on a pristine stack, so we have to reset @@ -254,11 +263,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, // FIXME: we need to set THREADPTR in thread_info... if (clone_flags & CLONE_SETTLS) childregs->areg[2] = childregs->areg[6]; - } else { - /* In kernel space, we start a new thread with a new stack. */ - childregs->wmask = 1; - childregs->areg[1] = tos; + p->thread.ra = MAKE_RA_FOR_CALL( + (unsigned long)ret_from_kernel_thread, 1); + + /* pass parameters to ret_from_kernel_thread: + * a2 = thread_fn, a3 = thread_fn arg + */ + *((int *)childregs - 1) = thread_fn_arg; + *((int *)childregs - 2) = usp_thread_fn; + + /* Childregs are only used when we're going to userspace + * in which case start_thread will set them up. + */ } #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) @@ -354,8 +371,6 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp, void __user *child_tid, long a5, struct pt_regs *regs) { - if (!newsp) - newsp = regs->areg[1]; return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index a8b9f1fd1e17..afe058b24e6e 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -43,7 +43,6 @@ EXPORT_SYMBOL(__strncpy_user); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(empty_zero_page); /* -- cgit v1.2.3-59-g8ed1b From f0a1bf0859ffa059f9009d6a17e8d46241fb2161 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 25 Oct 2012 11:10:51 +0400 Subject: xtensa: switch to generic kernel_execve() Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/Kconfig | 1 + arch/xtensa/kernel/entry.S | 18 +----------------- 2 files changed, 2 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 9525b70a13a1..0d1f36a22c98 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -14,6 +14,7 @@ config XTENSA select MODULES_USE_ELF_RELA select GENERIC_PCI_IOMAP select GENERIC_KERNEL_THREAD + select GENERIC_KERNEL_EXECVE select ARCH_WANT_OPTIONAL_GPIOLIB help Xtensa processors are 32-bit RISC machines designed by Tensilica diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index d4ec1381aebc..90bfc1dbc13d 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1832,22 +1832,6 @@ ENTRY(system_call) retw -/* - * Do a system call from kernel instead of calling sys_execve, so we end up - * with proper pt_regs. - * - * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) - * a2 a2 a3 a4 - */ - -ENTRY(kernel_execve) - entry a1, 16 - mov a6, a2 # arg0 is in a6 - movi a2, __NR_execve - syscall - - retw - /* * Task switch. * @@ -1940,6 +1924,6 @@ ENTRY(ret_from_kernel_thread) call4 schedule_tail mov a6, a3 callx4 a2 - call4 do_exit + j common_exception_return ENDPROC(ret_from_kernel_thread) -- cgit v1.2.3-59-g8ed1b From dc241f2c1761bfdec85915f4bbf7e750663f3442 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 25 Oct 2012 11:10:52 +0400 Subject: xtensa: switch to generic sys_execve() Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/syscall.h | 2 +- arch/xtensa/include/asm/unistd.h | 1 + arch/xtensa/include/uapi/asm/unistd.h | 2 +- arch/xtensa/kernel/process.c | 25 ------------------------- 4 files changed, 3 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index c1dacca312f3..124aeee0d381 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -10,7 +10,7 @@ struct pt_regs; struct sigaction; -asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); +asmlinkage long sys_execve(char*, char**, char**, struct pt_regs*); asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); asmlinkage long xtensa_ptrace(long, long, long, long); asmlinkage long xtensa_sigreturn(struct pt_regs*); diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index d9fa52dbfba0..f4e6eaa40d1c 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -1,6 +1,7 @@ #ifndef _XTENSA_UNISTD_H #define _XTENSA_UNISTD_H +#define __ARCH_WANT_SYS_EXECVE #include /* diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index 1fd157f31211..9f36d0e3e0ac 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -262,7 +262,7 @@ __SYSCALL(115, sys_sendmmsg, 4) #define __NR_clone 116 __SYSCALL(116, xtensa_clone, 5) #define __NR_execve 117 -__SYSCALL(117, xtensa_execve, 3) +__SYSCALL(117, sys_execve, 3) #define __NR_exit 118 __SYSCALL(118, sys_exit, 1) #define __NR_exit_group 119 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index be52fe437c1b..09ae7bfab9a7 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -373,28 +373,3 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp, { return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } - -/* - * xtensa_execve() executes a new program. - */ - -asmlinkage -long xtensa_execve(const char __user *name, - const char __user *const __user *argv, - const char __user *const __user *envp, - long a3, long a4, long a5, - struct pt_regs *regs) -{ - long error; - struct filename *filename; - - filename = getname(name); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename->name, argv, envp, regs); - putname(filename); -out: - return error; -} - -- cgit v1.2.3-59-g8ed1b From b6514633bdc6a511f7c44b3ecb86d6071374239d Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Thu, 25 Oct 2012 11:00:24 +0200 Subject: x86: remove obsolete comment from asm/xen/hypervisor.h Signed-off-by: Olaf Hering Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/include/asm/xen/hypervisor.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index 66d0fff1ee84..125f344f06a9 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -33,7 +33,6 @@ #ifndef _ASM_X86_XEN_HYPERVISOR_H #define _ASM_X86_XEN_HYPERVISOR_H -/* arch/i386/kernel/setup.c */ extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; -- cgit v1.2.3-59-g8ed1b From c8d258a7b2b6633b79cad4464e8dfb3dc328d897 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 24 Oct 2012 16:26:08 +0100 Subject: xen/arm: use the __HVC macro Use the new __HVC macro in hypercall.S. Signed-off-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/xen/hypercall.S | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index 074f5ed101b9..71f723984cbd 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -48,20 +48,16 @@ #include #include +#include #include -/* HVC 0xEA1 */ -#ifdef CONFIG_THUMB2_KERNEL -#define xen_hvc .word 0xf7e08ea1 -#else -#define xen_hvc .word 0xe140ea71 -#endif +#define XEN_IMM 0xEA1 #define HYPERCALL_SIMPLE(hypercall) \ ENTRY(HYPERVISOR_##hypercall) \ mov r12, #__HYPERVISOR_##hypercall; \ - xen_hvc; \ + __HVC(XEN_IMM); \ mov pc, lr; \ ENDPROC(HYPERVISOR_##hypercall) @@ -76,7 +72,7 @@ ENTRY(HYPERVISOR_##hypercall) \ stmdb sp!, {r4} \ ldr r4, [sp, #4] \ mov r12, #__HYPERVISOR_##hypercall; \ - xen_hvc \ + __HVC(XEN_IMM); \ ldm sp!, {r4} \ mov pc, lr \ ENDPROC(HYPERVISOR_##hypercall) @@ -100,7 +96,7 @@ ENTRY(privcmd_call) mov r2, r3 ldr r3, [sp, #8] ldr r4, [sp, #4] - xen_hvc + __HVC(XEN_IMM) ldm sp!, {r4} mov pc, lr ENDPROC(privcmd_call); -- cgit v1.2.3-59-g8ed1b From 95a7d76897c1e7243d4137037c66d15cbf2cce76 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 31 Oct 2012 12:38:31 -0400 Subject: xen/mmu: Use Xen specific TLB flush instead of the generic one. As Mukesh explained it, the MMUEXT_TLB_FLUSH_ALL allows the hypervisor to do a TLB flush on all active vCPUs. If instead we were using the generic one (which ends up being xen_flush_tlb) we end up making the MMUEXT_TLB_FLUSH_LOCAL hypercall. But before we make that hypercall the kernel will IPI all of the vCPUs (even those that were asleep from the hypervisor perspective). The end result is that we needlessly wake them up and do a TLB flush when we can just let the hypervisor do it correctly. This patch gives around 50% speed improvement when migrating idle guest's from one host to another. Oracle-bug: 14630170 CC: stable@vger.kernel.org Tested-by: Jingjie Jiang Suggested-by: Mukesh Rathor Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 21 ++++++++++++++++++++- include/trace/events/xen.h | 8 ++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6226c99729b9..dcf5f2dd91ec 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1288,6 +1288,25 @@ unsigned long xen_read_cr2_direct(void) return this_cpu_read(xen_vcpu_info.arch.cr2); } +void xen_flush_tlb_all(void) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + trace_xen_mmu_flush_tlb_all(0); + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = MMUEXT_TLB_FLUSH_ALL; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + preempt_enable(); +} static void xen_flush_tlb(void) { struct mmuext_op *op; @@ -2518,7 +2537,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, err = 0; out: - flush_tlb_all(); + xen_flush_tlb_all(); return err; } diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 15ba03bdd7c6..d06b6da5c1e3 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -377,6 +377,14 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd, DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); +TRACE_EVENT(xen_mmu_flush_tlb_all, + TP_PROTO(int x), + TP_ARGS(x), + TP_STRUCT__entry(__array(char, x, 0)), + TP_fast_assign((void)x), + TP_printk("%s", "") + ); + TRACE_EVENT(xen_mmu_flush_tlb, TP_PROTO(int x), TP_ARGS(x), -- cgit v1.2.3-59-g8ed1b From 87da7e66a40532b743cd50972fcf85a1f15b14ea Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Oct 2012 14:07:59 +0800 Subject: KVM: x86: fix vcpu->mmio_fragments overflow After commit b3356bf0dbb349 (KVM: emulator: optimize "rep ins" handling), the pieces of io data can be collected and write them to the guest memory or MMIO together Unfortunately, kvm splits the mmio access into 8 bytes and store them to vcpu->mmio_fragments. If the guest uses "rep ins" to move large data, it will cause vcpu->mmio_fragments overflow The bug can be exposed by isapc (-M isapc): [23154.818733] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC [ ......] [23154.858083] Call Trace: [23154.859874] [] kvm_get_cr8+0x1d/0x28 [kvm] [23154.861677] [] kvm_arch_vcpu_ioctl_run+0xcda/0xe45 [kvm] [23154.863604] [] ? kvm_arch_vcpu_load+0x17b/0x180 [kvm] Actually, we can use one mmio_fragment to store a large mmio access then split it when we pass the mmio-exit-info to userspace. After that, we only need two entries to store mmio info for the cross-mmio pages access Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 60 +++++++++++++++++++++++++++--------------------- include/linux/kvm_host.h | 15 ++---------- 2 files changed, 36 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1eefebe5d727..224a7e78cb6c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3779,7 +3779,7 @@ static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; - memcpy(vcpu->run->mmio.data, frag->data, frag->len); + memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; } @@ -3832,18 +3832,11 @@ mmio: bytes -= handled; val += handled; - while (bytes) { - unsigned now = min(bytes, 8U); - - frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; - frag->gpa = gpa; - frag->data = val; - frag->len = now; - - gpa += now; - val += now; - bytes -= now; - } + WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); + frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; + frag->gpa = gpa; + frag->data = val; + frag->len = bytes; return X86EMUL_CONTINUE; } @@ -3890,7 +3883,7 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; - vcpu->run->mmio.len = vcpu->mmio_fragments[0].len; + vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = gpa; @@ -5522,28 +5515,44 @@ static int complete_emulated_pio(struct kvm_vcpu *vcpu) * * read: * for each fragment - * write gpa, len - * exit - * copy data + * for each mmio piece in the fragment + * write gpa, len + * exit + * copy data * execute insn * * write: * for each fragment - * write gpa, len - * copy data - * exit + * for each mmio piece in the fragment + * write gpa, len + * copy data + * exit */ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; + unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ - frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++]; + frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; + len = min(8u, frag->len); if (!vcpu->mmio_is_write) - memcpy(frag->data, run->mmio.data, frag->len); + memcpy(frag->data, run->mmio.data, len); + + if (frag->len <= 8) { + /* Switch to the next fragment. */ + frag++; + vcpu->mmio_cur_fragment++; + } else { + /* Go forward to the next mmio piece. */ + frag->data += len; + frag->gpa += len; + frag->len -= len; + } + if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; if (vcpu->mmio_is_write) @@ -5551,13 +5560,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } - /* Initiate next fragment */ - ++frag; + run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) - memcpy(run->mmio.data, frag->data, frag->len); - run->mmio.len = frag->len; + memcpy(run->mmio.data, frag->data, min(8u, frag->len)); + run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 93bfc9f9815c..ecc554374e44 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -42,19 +42,8 @@ */ #define KVM_MEMSLOT_INVALID (1UL << 16) -/* - * If we support unaligned MMIO, at most one fragment will be split into two: - */ -#ifdef KVM_UNALIGNED_MMIO -# define KVM_EXTRA_MMIO_FRAGMENTS 1 -#else -# define KVM_EXTRA_MMIO_FRAGMENTS 0 -#endif - -#define KVM_USER_MMIO_SIZE 8 - -#define KVM_MAX_MMIO_FRAGMENTS \ - (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) +/* Two fragments for cross MMIO pages. */ +#define KVM_MAX_MMIO_FRAGMENTS 2 /* * For the normal pfn, the highest 12 bits should be zero, -- cgit v1.2.3-59-g8ed1b From a5788caa269e446201018bb8879a1dd90f41d32b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Nov 2012 13:20:42 +0000 Subject: FRV: Add missing linux/export.h #inclusions Add missing linux/export.h #inclusions to the FRV arch. Signed-off-by: David Howells --- arch/frv/mb93090-mb00/pci-dma-nommu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index e47857f889b6..b99c2a7cc7a4 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3-59-g8ed1b From 5f0231d97b2d361292b090b81479a68123010376 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Nov 2012 13:20:42 +0000 Subject: FRV: Don't objcopy the GNU build_id note Don't let objcopy transfer the GNU build_id note into the loadable image as it is located at address 0 and the image ends up >3G in size. Signed-off-by: David Howells --- arch/frv/boot/Makefile | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/frv/boot/Makefile b/arch/frv/boot/Makefile index 6ae3254da019..636d5bbcd53f 100644 --- a/arch/frv/boot/Makefile +++ b/arch/frv/boot/Makefile @@ -17,6 +17,8 @@ PARAMS_PHYS = 0x0207c000 INITRD_PHYS = 0x02180000 INITRD_VIRT = 0x02180000 +OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment + # # If you don't define ZRELADDR above, # then it defaults to ZTEXTADDR @@ -32,18 +34,18 @@ Image: $(obj)/Image targets: $(obj)/Image $(obj)/Image: vmlinux FORCE - $(OBJCOPY) -O binary -R .note -R .comment -S vmlinux $@ + $(OBJCOPY) $(OBJCOPYFLAGS) -S vmlinux $@ #$(obj)/Image: $(CONFIGURE) $(SYSTEM) -# $(OBJCOPY) -O binary -R .note -R .comment -g -S $(SYSTEM) $@ +# $(OBJCOPY) $(OBJCOPYFLAGS) -g -S $(SYSTEM) $@ bzImage: zImage zImage: $(CONFIGURE) compressed/$(LINUX) - $(OBJCOPY) -O binary -R .note -R .comment -S compressed/$(LINUX) $@ + $(OBJCOPY) $(OBJCOPYFLAGS) -S compressed/$(LINUX) $@ bootpImage: bootp/bootp - $(OBJCOPY) -O binary -R .note -R .comment -S bootp/bootp $@ + $(OBJCOPY) $(OBJCOPYFLAGS) -S bootp/bootp $@ compressed/$(LINUX): $(LINUX) dep @$(MAKE) -C compressed $(LINUX) -- cgit v1.2.3-59-g8ed1b From 1ee6f5669a7eaba0e2f4e0dd0599b56eb8a9a090 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Nov 2012 13:20:42 +0000 Subject: FRV: Fix the preemption handling Fix the preemption handling in FRV code where the PREEMPT_ACTIVE value is incorrectly loaded into the threadinfo flags rather than the threadinfo preemption count. Unfortunately, the code cannot be simply converted to use preempt_schedule_irq() as is because FRV uses virtual interrupt disablement to cut down on the cost of actually disabling interrupts and thus local_irq_enable() doesn't actually enable interrupts. Reported-by: Al Viro Signed-off-by: David Howells cc: Al Viro --- arch/frv/kernel/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index ee0beb354e4d..d64c52627961 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -1086,7 +1086,7 @@ __entry_preempt_need_resched: beq icc0,#1,__entry_return_direct setlos #PREEMPT_ACTIVE,gr5 - sti gr5,@(gr15,#TI_FLAGS) + sti gr5,@(gr15,#TI_PRE_COUNT) andi gr23,#~PSR_PIL,gr23 movgs gr23,psr -- cgit v1.2.3-59-g8ed1b From e7aa51b2e52274b13be3209e2787d1b2ce9624fd Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Nov 2012 13:20:43 +0000 Subject: FRV: Fix the new-style kernel_thread() stuff The kernel_thread() changes for FRV don't work, and FRV fails to boot, starting with: commit 02ce496f152df87be081a64796498942c433a2fd Author: Al Viro Date: Tue Sep 18 22:18:51 2012 -0400 Subject: frv: split ret_from_fork, simplify kernel_thread() a lot The problem is that the userspace registers are completely cleared when a kernel thread is created and all subsequent user threads are then copied from that. Unfortunately, however, the TBR and PSR registers are restored from the pt_regs and the values they should be set to are clobbered by the memset. Instead, copy across the old user registers as normal, and then merely alter GR8 and GR9 in it if we're going to execute a kernel thread. Signed-off-by: David Howells --- arch/frv/kernel/process.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index e1e3aa196aa4..7e33215f1d8f 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -181,6 +181,9 @@ int copy_thread(unsigned long clone_flags, childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE); + /* set up the userspace frame (the only place that the USP is stored) */ + *childregs = *__kernel_frame0_ptr; + p->set_child_tid = p->clear_child_tid = NULL; p->thread.frame = childregs; @@ -191,10 +194,8 @@ int copy_thread(unsigned long clone_flags, p->thread.frame0 = childregs; if (unlikely(!regs)) { - memset(childregs, 0, sizeof(struct pt_regs)); childregs->gr9 = usp; /* function */ childregs->gr8 = arg; - childregs->psr = PSR_S; p->thread.pc = (unsigned long) ret_from_kernel_thread; save_user_regs(p->thread.user); return 0; -- cgit v1.2.3-59-g8ed1b From 7b7ade117951dcc5ec947595ea7ff622fa56895e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 15 Oct 2012 10:53:17 -0400 Subject: frv: switch to saner kernel_execve() semantics Signed-off-by: Al Viro --- arch/frv/Kconfig | 1 + arch/frv/include/asm/unistd.h | 1 - arch/frv/kernel/entry.S | 5 ----- 3 files changed, 1 insertion(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index b7412504f08a..df2eb4bd9fa2 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -13,6 +13,7 @@ config FRV select GENERIC_CPU_DEVICES select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_KERNEL_THREAD + select GENERIC_KERNEL_EXECVE config ZONE_DMA bool diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h index 266a5b25a0c1..2358634cacca 100644 --- a/arch/frv/include/asm/unistd.h +++ b/arch/frv/include/asm/unistd.h @@ -30,7 +30,6 @@ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_EXECVE -#define __ARCH_WANT_KERNEL_EXECVE /* * "Conditional" syscalls diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index d64c52627961..b1cd7e8145bb 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -869,11 +869,6 @@ ret_from_kernel_thread: call schedule_tail calll.p @(gr21,gr0) or gr20,gr20,gr8 - bra sys_exit - - .globl ret_from_kernel_execve -ret_from_kernel_execve: - ori gr28,0,sp bra __syscall_exit ################################################################################################### -- cgit v1.2.3-59-g8ed1b From 1d72d9f83df057e71c7951def41138a0230bf737 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 2 Nov 2012 12:05:44 -0400 Subject: frv: fix the broken preempt Just get %icc2 into the state we would have after local_irq_disable() and physical IRQ having happened since then. Then we can simply use preempt_schedule_irq() and be done with the whole mess. Acked-by: David Howells Signed-off-by: Al Viro --- arch/frv/kernel/entry.S | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index b1cd7e8145bb..dfcd263c0517 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -1075,27 +1075,10 @@ __entry_return_from_kernel_interrupt: subicc gr5,#0,gr0,icc0 beq icc0,#0,__entry_return_direct -__entry_preempt_need_resched: - ldi @(gr15,#TI_FLAGS),gr4 - andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 - beq icc0,#1,__entry_return_direct - - setlos #PREEMPT_ACTIVE,gr5 - sti gr5,@(gr15,#TI_PRE_COUNT) - - andi gr23,#~PSR_PIL,gr23 - movgs gr23,psr - - call schedule - sti gr0,@(gr15,#TI_PRE_COUNT) - - movsg psr,gr23 - ori gr23,#PSR_PIL_14,gr23 - movgs gr23,psr - bra __entry_preempt_need_resched -#else - bra __entry_return_direct + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ + call preempt_schedule_irq #endif + bra __entry_return_direct ############################################################################### -- cgit v1.2.3-59-g8ed1b From e5c5f2adeb370559f4b221d57214db85858b786a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 22 Oct 2012 11:42:54 -0600 Subject: ARM: implement debug_ll_io_init() When using DEBUG_LL, the UART's (or other HW's) registers are mapped into early page tables based on the results of assembly macro addruart. Later, when the page tables are replaced, the same virtual address must remain valid. Historically, this has been ensured by using defines from in both the implementation of addruart, and the machine's .map_io() function. However, with the move to single zImage, we wish to remove . To enable this, the macro addruart may be used when constructing the late page tables too; addruart is exposed as a C function debug_ll_addr(), and used to set up the required mapping in debug_ll_io_init(), which may called on an opt-in basis from a machine's .map_io() function. Signed-off-by: Rob Herring [swarren: Mask map.virtual with PAGE_MASK. Checked for NULL results from debug_ll_addr (e.g. when selected UART isn't valid). Fixed compile when either !CONFIG_DEBUG_LL or CONFIG_DEBUG_SEMIHOSTING.] Signed-off-by: Stephen Warren Signed-off-by: Olof Johansson --- arch/arm/include/asm/mach/map.h | 7 +++++++ arch/arm/kernel/debug.S | 14 ++++++++++++++ arch/arm/mm/mmu.c | 16 ++++++++++++++++ 3 files changed, 37 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 195ac2f9d3d3..2fe141fcc8d6 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -40,6 +40,13 @@ extern void iotable_init(struct map_desc *, int); extern void vm_reserve_area_early(unsigned long addr, unsigned long size, void *caller); +#ifdef CONFIG_DEBUG_LL +extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr); +extern void debug_ll_io_init(void); +#else +static inline void debug_ll_io_init(void) {} +#endif + struct mem_type; extern const struct mem_type *get_mem_type(unsigned int type); /* diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 66f711b2e0e8..6809200c31fb 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -100,6 +100,13 @@ ENTRY(printch) b 1b ENDPROC(printch) +ENTRY(debug_ll_addr) + addruart r2, r3, ip + str r2, [r0] + str r3, [r1] + mov pc, lr +ENDPROC(debug_ll_addr) + #else ENTRY(printascii) @@ -119,4 +126,11 @@ ENTRY(printch) mov pc, lr ENDPROC(printch) +ENTRY(debug_ll_addr) + mov r2, #0 + str r2, [r0] + str r2, [r1] + mov pc, lr +ENDPROC(debug_ll_addr) + #endif diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 941dfb9e9a78..39719bb93caa 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -876,6 +876,22 @@ static void __init pci_reserve_io(void) #define pci_reserve_io() do { } while (0) #endif +#ifdef CONFIG_DEBUG_LL +void __init debug_ll_io_init(void) +{ + struct map_desc map; + + debug_ll_addr(&map.pfn, &map.virtual); + if (!map.pfn || !map.virtual) + return; + map.pfn = __phys_to_pfn(map.pfn); + map.virtual &= PAGE_MASK; + map.length = PAGE_SIZE; + map.type = MT_DEVICE; + create_mapping(&map); +} +#endif + static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); -- cgit v1.2.3-59-g8ed1b