From 29c380f5f06d0c5a320b9bb6f8987065e7b81c91 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:04 +0200 Subject: [S390] memory detection: stop at first memory hole. If both sclp and diag memory detection don't work stop at the first memory hole. Otherwise the code might loop forever... Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/early.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 5e47936573f2..50538e545618 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize) break; #endif /* - * Finish memory detection at the first hole, unless - * - we reached the hsa -> skip it. - * - we know there must be more. + * Finish memory detection at the first hole + * if storage size is unknown. */ - if (cc == -1UL && !memsize && old_addr != ADDR2G) + if (cc == -1UL && !memsize) break; if (memsize && addr >= memsize) break; -- cgit v1.2.3-59-g8ed1b From 6fc321fd7dd91f0592f37503219196835314fbb7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:25 +0200 Subject: [S390] cio/ipl: Clean interface between cio and ipl code. Clean interface between cio and ipl code, so Peter stops complaining. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/ipl.c | 23 ++++++++++++++++++++++- drivers/s390/cio/cio.c | 38 ++++++++++---------------------------- include/asm-s390/cio.h | 7 +++++++ include/asm-s390/ipl.h | 3 +-- 4 files changed, 40 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index f731185bf2bd..a83cf1fdd8f5 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -47,7 +47,7 @@ enum ipl_type { * Must be in data section since the bss section * is not cleared when these are accessed. */ -u16 ipl_devno __attribute__((__section__(".data"))) = 0; +static u16 ipl_devno __attribute__((__section__(".data"))) = 0; u32 ipl_flags __attribute__((__section__(".data"))) = 0; static char *ipl_type_str(enum ipl_type type) @@ -1038,6 +1038,27 @@ static int __init s390_ipl_init(void) __initcall(s390_ipl_init); +void __init ipl_save_parameters(void) +{ + struct cio_iplinfo iplinfo; + unsigned int *ipl_ptr; + void *src, *dst; + + if (cio_get_iplinfo(&iplinfo)) + return; + + ipl_devno = iplinfo.devno; + ipl_flags |= IPL_DEVNO_VALID; + if (!iplinfo.is_qdio) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; +} + static LIST_HEAD(rcall); static DEFINE_MUTEX(rcall_mutex); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9cb129ab5be5..21af446c1f2d 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1048,37 +1048,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) do_reipl_asm(*((__u32*)&schid)); } -static struct schib __initdata ipl_schib; - -/* - * ipl_save_parameters gets called very early. It is not allowed to access - * anything in the bss section at all. The bss section is not cleared yet, - * but may contain some ipl parameters written by the firmware. - * These parameters (if present) are copied to 0x2000. - * To avoid corruption of the ipl parameters, all variables used by this - * function must reside on the stack or in the data section. - */ -void ipl_save_parameters(void) +int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) { struct subchannel_id schid; - unsigned int *ipl_ptr; - void *src, *dst; + struct schib schib; schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; if (!schid.one) - return; - if (stsch(schid, &ipl_schib)) - return; - if (!ipl_schib.pmcw.dnv) - return; - ipl_devno = ipl_schib.pmcw.dev; - ipl_flags |= IPL_DEVNO_VALID; - if (!ipl_schib.pmcw.qf) - return; - ipl_flags |= IPL_PARMBLOCK_VALID; - ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; - src = (void *)(unsigned long)*ipl_ptr; - dst = (void *)IPL_PARMBLOCK_ORIGIN; - memmove(dst, src, PAGE_SIZE); - *ipl_ptr = IPL_PARMBLOCK_ORIGIN; + return -ENODEV; + if (stsch(schid, &schib)) + return -ENODEV; + if (!schib.pmcw.dnv) + return -ENODEV; + iplinfo->devno = schib.pmcw.dev; + iplinfo->is_qdio = schib.pmcw.qf; + return 0; } diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index d92785030980..0db017bc7d09 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h @@ -292,6 +292,13 @@ extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); +struct cio_iplinfo { + u16 devno; + int is_qdio; +}; + +extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); + #endif #endif diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 0eb64083480a..15bb0b529551 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h @@ -74,10 +74,9 @@ struct ipl_parameter_block { } __attribute__((packed)); /* - * IPL validity flags and parameters as detected in head.S + * IPL validity flags */ extern u32 ipl_flags; -extern u16 ipl_devno; extern u32 dump_prefix_page; extern void do_reipl(void); -- cgit v1.2.3-59-g8ed1b From 03ff9a235a0602724fc54916469b6e0939c62c9b Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:40 +0200 Subject: [S390] System call cleanup. Remove system call glue for sys_clone, sys_fork, sys_vfork, sys_execve, sys_sigreturn, sys_rt_sigreturn and sys_sigaltstack. Call do_execve from kernel_execve directly, move pt_regs to the right place and branch to sysc_return to start the user space program. This removes the last in-kernel system call. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/compat_linux.c | 60 ++++++++++++----------- arch/s390/kernel/compat_signal.c | 14 +++--- arch/s390/kernel/entry.S | 87 +++++++++++++--------------------- arch/s390/kernel/entry64.S | 100 +++++++++++---------------------------- arch/s390/kernel/process.c | 82 +++++++++++++++++++------------- arch/s390/kernel/signal.c | 10 ++-- arch/s390/kernel/sys_s390.c | 20 -------- arch/s390/kernel/syscalls.S | 14 +++--- 8 files changed, 162 insertions(+), 225 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 664c669b1856..5236fdb17fcb 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) * sys32_execve() executes a new program after the asm stub has set * things up for us. This should basically do what I want it to. */ -asmlinkage long -sys32_execve(struct pt_regs regs) +asmlinkage long sys32_execve(void) { - int error; - char * filename; + struct pt_regs *regs = task_pt_regs(current); + char *filename; + unsigned long result; + int rc; - filename = getname(compat_ptr(regs.orig_gpr2)); - error = PTR_ERR(filename); - if (IS_ERR(filename)) + filename = getname(compat_ptr(regs->orig_gpr2)); + if (IS_ERR(filename)) { + result = PTR_ERR(filename); goto out; - error = compat_do_execve(filename, compat_ptr(regs.gprs[3]), - compat_ptr(regs.gprs[4]), ®s); - if (error == 0) - { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - current->thread.fp_regs.fpc=0; - asm volatile("sfpc %0,0" : : "d" (0)); } + rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]), + compat_ptr(regs->gprs[4]), regs); + if (rc) { + result = rc; + goto out_putname; + } + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + current->thread.fp_regs.fpc=0; + asm volatile("sfpc %0,0" : : "d" (0)); + result = regs->gprs[2]; +out_putname: putname(filename); out: - return error; + return result; } @@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count) return sys_write(fd, buf, count); } -asmlinkage long sys32_clone(struct pt_regs regs) +asmlinkage long sys32_clone(void) { - unsigned long clone_flags; - unsigned long newsp; + struct pt_regs *regs = task_pt_regs(current); + unsigned long clone_flags; + unsigned long newsp; int __user *parent_tidptr, *child_tidptr; - clone_flags = regs.gprs[3] & 0xffffffffUL; - newsp = regs.orig_gpr2 & 0x7fffffffUL; - parent_tidptr = compat_ptr(regs.gprs[4]); - child_tidptr = compat_ptr(regs.gprs[5]); - if (!newsp) - newsp = regs.gprs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + clone_flags = regs->gprs[3] & 0xffffffffUL; + newsp = regs->orig_gpr2 & 0x7fffffffUL; + parent_tidptr = compat_ptr(regs->gprs[4]); + child_tidptr = compat_ptr(regs->gprs[5]); + if (!newsp) + newsp = regs->gprs[15]; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 887a9881d0d0..80a54a0149ab 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, } asmlinkage long -sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, - struct pt_regs *regs) +sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss) { + struct pt_regs *regs = task_pt_regs(current); stack_t kss, koss; unsigned long ss_sp; int ret, err = 0; @@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) return 0; } -asmlinkage long sys32_sigreturn(struct pt_regs *regs) +asmlinkage long sys32_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; sigset_t set; @@ -370,8 +371,9 @@ badframe: return 0; } -asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) +asmlinkage long sys32_rt_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; sigset_t set; stack_t st; @@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) return regs->gprs[2]; badframe: - force_sig(SIGSEGV, current); - return 0; + force_sig(SIGSEGV, current); + return 0; } /* diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dddc3de30401..c8a2212014e0 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -249,8 +249,6 @@ sysc_do_restart: bnz BASED(sysc_tracesys) basr %r14,%r8 # call sys_xxxx st %r2,SP_R2(%r15) # store return value (change R2 on stack) - # ATTENTION: check sys_execve_glue before - # changing anything here !! sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? @@ -381,50 +379,37 @@ ret_from_fork: b BASED(sysc_return) # -# clone, fork, vfork, exec and sigreturn need glue, -# because they all expect pt_regs as parameter, -# but are called with different parameter. -# return-address is set up above +# kernel_execve function needs to deal with pt_regs that is not +# at the usual place # -sys_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lclone) - br %r1 # branch to sys_clone - -sys_fork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lfork) - br %r1 # branch to sys_fork - -sys_vfork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lvfork) - br %r1 # branch to sys_vfork - -sys_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lexecve) - lr %r12,%r14 # save return address - basr %r14,%r1 # call sys_execve - ltr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 - # in system_call/sysc_tracesys - -sys_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lsigreturn) - br %r1 # branch to sys_sigreturn - -sys_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lrt_sigreturn) - br %r1 # branch to sys_sigreturn - -sys_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lsigaltstack) - br %r1 # branch to sys_sigreturn + .globl kernel_execve +kernel_execve: + stm %r12,%r15,48(%r15) + lr %r14,%r15 + l %r13,__LC_SVC_NEW_PSW+4 + s %r15,BASED(.Lc_spsize) + st %r14,__SF_BACKCHAIN(%r15) + la %r12,SP_PTREGS(%r15) + xc 0(__PT_SIZE,%r12),0(%r12) + l %r1,BASED(.Ldo_execve) + lr %r5,%r12 + basr %r14,%r1 + ltr %r2,%r2 + be BASED(0f) + a %r15,BASED(.Lc_spsize) + lm %r12,%r15,48(%r15) + br %r14 + # execve succeeded. +0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts + l %r15,__LC_KERNEL_STACK # load ksp + s %r15,BASED(.Lc_spsize) # make room for registers & psw + l %r9,__LC_THREAD_INFO + mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + l %r1,BASED(.Lexecve_tail) + basr %r14,%r1 + b BASED(sysc_return) /* * Program check handler routine @@ -1031,19 +1016,11 @@ cleanup_io_leave_insn: .Ldo_extint: .long do_extint .Ldo_signal: .long do_signal .Lhandle_per: .long do_single_step +.Ldo_execve: .long do_execve +.Lexecve_tail: .long execve_tail .Ljump_table: .long pgm_check_table .Lschedule: .long schedule -.Lclone: .long sys_clone -.Lexecve: .long sys_execve -.Lfork: .long sys_fork -.Lrt_sigreturn: .long sys_rt_sigreturn -.Lrt_sigsuspend: - .long sys_rt_sigsuspend -.Lsigreturn: .long sys_sigreturn -.Lsigsuspend: .long sys_sigsuspend -.Lsigaltstack: .long sys_sigaltstack .Ltrace: .long syscall_trace -.Lvfork: .long sys_vfork .Lschedtail: .long schedule_tail .Lsysc_table: .long sys_call_table #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 0f758c329a5d..93745fd8f555 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -244,8 +244,6 @@ sysc_noemu: jnz sysc_tracesys basr %r14,%r8 # call sys_xxxx stg %r2,SP_R2(%r15) # store return value (change R2 on stack) - # ATTENTION: check sys_execve_glue before - # changing anything here !! sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? @@ -371,77 +369,35 @@ ret_from_fork: j sysc_return # -# clone, fork, vfork, exec and sigreturn need glue, -# because they all expect pt_regs as parameter, -# but are called with different parameter. -# return-address is set up above +# kernel_execve function needs to deal with pt_regs that is not +# at the usual place # -sys_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_clone # branch to sys_clone - -#ifdef CONFIG_COMPAT -sys32_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys32_clone # branch to sys32_clone -#endif - -sys_fork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_fork # branch to sys_fork - -sys_vfork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_vfork # branch to sys_vfork - -sys_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - lgr %r12,%r14 # save return address - brasl %r14,sys_execve # call sys_execve - ltgr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 6(%r12) # SKIP STG 2,SP_R2(15) in - # system_call/sysc_tracesys -#ifdef CONFIG_COMPAT -sys32_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - lgr %r12,%r14 # save return address - brasl %r14,sys32_execve # call sys32_execve - ltgr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 6(%r12) # SKIP STG 2,SP_R2(15) in - # system_call/sysc_tracesys -#endif - -sys_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_sigreturn # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_sigreturn # branch to sys32_sigreturn -#endif - -sys_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_rt_sigreturn # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_rt_sigreturn # branch to sys32_sigreturn -#endif - -sys_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_sigaltstack # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_sigaltstack_wrapper # branch to sys_sigreturn -#endif + .globl kernel_execve +kernel_execve: + stmg %r12,%r15,96(%r15) + lgr %r14,%r15 + aghi %r15,-SP_SIZE + stg %r14,__SF_BACKCHAIN(%r15) + la %r12,SP_PTREGS(%r15) + xc 0(__PT_SIZE,%r12),0(%r12) + lgr %r5,%r12 + brasl %r14,do_execve + ltgfr %r2,%r2 + je 0f + aghi %r15,SP_SIZE + lmg %r12,%r15,96(%r15) + br %r14 + # execve succeeded. +0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts + lg %r15,__LC_KERNEL_STACK # load ksp + aghi %r15,-SP_SIZE # make room for registers & psw + lg %r13,__LC_SVC_NEW_PSW+8 + lg %r9,__LC_THREAD_INFO + mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + brasl %r14,execve_tail + j sysc_return /* * Program check handler routine diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5acfac654f9d..11d9b0197626 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, return 0; } -asmlinkage long sys_fork(struct pt_regs regs) +asmlinkage long sys_fork(void) { - return do_fork(SIGCHLD, regs.gprs[15], ®s, 0, NULL, NULL); + struct pt_regs *regs = task_pt_regs(current); + return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL); } -asmlinkage long sys_clone(struct pt_regs regs) +asmlinkage long sys_clone(void) { - unsigned long clone_flags; - unsigned long newsp; + struct pt_regs *regs = task_pt_regs(current); + unsigned long clone_flags; + unsigned long newsp; int __user *parent_tidptr, *child_tidptr; - clone_flags = regs.gprs[3]; - newsp = regs.orig_gpr2; - parent_tidptr = (int __user *) regs.gprs[4]; - child_tidptr = (int __user *) regs.gprs[5]; - if (!newsp) - newsp = regs.gprs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + clone_flags = regs->gprs[3]; + newsp = regs->orig_gpr2; + parent_tidptr = (int __user *) regs->gprs[4]; + child_tidptr = (int __user *) regs->gprs[5]; + if (!newsp) + newsp = regs->gprs[15]; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } @@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs) * do not have enough call-clobbered registers to hold all * the information you need. */ -asmlinkage long sys_vfork(struct pt_regs regs) +asmlinkage long sys_vfork(void) { + struct pt_regs *regs = task_pt_regs(current); return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, - regs.gprs[15], ®s, 0, NULL, NULL); + regs->gprs[15], regs, 0, NULL, NULL); +} + +asmlinkage void execve_tail(void) +{ + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + current->thread.fp_regs.fpc = 0; + if (MACHINE_HAS_IEEE) + asm volatile("sfpc %0,%0" : : "d" (0)); } /* * sys_execve() executes a new program. */ -asmlinkage long sys_execve(struct pt_regs regs) +asmlinkage long sys_execve(void) { - int error; - char * filename; - - filename = getname((char __user *) regs.orig_gpr2); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename, (char __user * __user *) regs.gprs[3], - (char __user * __user *) regs.gprs[4], ®s); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - current->thread.fp_regs.fpc = 0; - if (MACHINE_HAS_IEEE) - asm volatile("sfpc %0,%0" : : "d" (0)); + struct pt_regs *regs = task_pt_regs(current); + char *filename; + unsigned long result; + int rc; + + filename = getname((char __user *) regs->orig_gpr2); + if (IS_ERR(filename)) { + result = PTR_ERR(filename); + goto out; } - putname(filename); + rc = do_execve(filename, (char __user * __user *) regs->gprs[3], + (char __user * __user *) regs->gprs[4], regs); + if (rc) { + result = rc; + goto out_putname; + } + execve_tail(); + result = regs->gprs[2]; +out_putname: + putname(filename); out: - return error; + return result; } - /* * fill in the FPU structure for a core dump. */ diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 554f9cf7499c..3c41907799a1 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, } asmlinkage long -sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - struct pt_regs *regs) +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { + struct pt_regs *regs = task_pt_regs(current); return do_sigaltstack(uss, uoss, regs->gprs[15]); } @@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) return 0; } -asmlinkage long sys_sigreturn(struct pt_regs *regs) +asmlinkage long sys_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); sigframe __user *frame = (sigframe __user *)regs->gprs[15]; sigset_t set; @@ -189,8 +190,9 @@ badframe: return 0; } -asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +asmlinkage long sys_rt_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; sigset_t set; diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 584ed95f3380..3a77c22cda78 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args) return -EFAULT; return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); } - -/* - * Do a system call from kernel instead of calling sys_execve so we - * end up with proper pt_regs. - */ -int kernel_execve(const char *filename, char *const argv[], char *const envp[]) -{ - register const char *__arg1 asm("2") = filename; - register char *const*__arg2 asm("3") = argv; - register char *const*__arg3 asm("4") = envp; - register long __svcres asm("2"); - asm volatile( - "svc %b1" - : "=d" (__svcres) - : "i" (__NR_execve), - "0" (__arg1), - "d" (__arg2), - "d" (__arg3) : "memory"); - return __svcres; -} diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index c774f1069e10..cd8d321cd0c2 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -10,7 +10,7 @@ NI_SYSCALL /* 0 */ SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) -SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue) +SYSCALL(sys_fork,sys_fork,sys_fork) SYSCALL(sys_read,sys_read,sys32_read_wrapper) SYSCALL(sys_write,sys_write,sys32_write_wrapper) SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ @@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) SYSCALL(sys_link,sys_link,sys32_link_wrapper) SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ -SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue) +SYSCALL(sys_execve,sys_execve,sys32_execve) SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) @@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) -SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue) -SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ +SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) +SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */ SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) NI_SYSCALL /* modify_ldt for i386 */ @@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) -SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) +SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn) SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) @@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ -SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue) +SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack) SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) NI_SYSCALL /* streams1 */ NI_SYSCALL /* streams2 */ -SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */ +SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) -- cgit v1.2.3-59-g8ed1b From bb11e3bdbac08f773a89f3ca287024a956ee8a12 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:41 +0200 Subject: [S390] Improved oops output. This patch adds two improvements to the oops output. First it adds an additional line after the PSW which decodes the different fields of it. Second a disassembler is added that decodes the instructions surrounding the faulting PSW. The output of a test oops now looks like this: kernel BUG at init/main.c:419 illegal operation: 0001 [#1] CPU: 0 Not tainted Process swapper (pid: 0, task: 0000000000464968, ksp: 00000000004be000) Krnl PSW : 0700000180000000 00000000000120b6 (rest_init+0x36/0x38) R:0 T:1 IO:1 EX:1 Key:0 M:0 W:0 P:0 AS:0 CC:0 PM:0 EA:3 Krnl GPRS: 0000000000000003 00000000004ba017 0000000000000022 0000000000000001 000000000003a5f6 0000000000000000 00000000004be6a8 0000000000000000 0000000000000000 00000000004b8200 0000000000003a50 0000000000008000 0000000000516368 000000000033d008 00000000000120b2 00000000004bdee0 Krnl Code: 00000000000120a6: e3e0f0980024 stg %r14,152(%r15) 00000000000120ac: c0e500014296 brasl %r14,3a5d8 00000000000120b2: a7f40001 brc 15,120b4 >00000000000120b6: 0707 bcr 0,%r7 00000000000120b8: eb7ff0500024 stmg %r7,%r15,80(%r15) 00000000000120be: c0d000195825 larl %r13,33d108 00000000000120c4: a7f13f00 tmll %r15,16128 00000000000120c8: a7840001 brc 8,120ca Call Trace: ([<00000000000120b2>] rest_init+0x32/0x38) [<00000000004be614>] start_kernel+0x37c/0x410 [<0000000000012020>] _ehead+0x20/0x80 Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/dis.c | 1278 ++++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/traps.c | 55 +- include/asm-s390/processor.h | 1 + 4 files changed, 1297 insertions(+), 39 deletions(-) create mode 100644 arch/s390/kernel/dis.c (limited to 'arch') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 5492d25d7d69..3195d375bd51 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional obj-y := bitmap.o traps.o time.o process.o base.o early.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ - semaphore.o s390_ext.o debug.o irq.o ipl.o + semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c new file mode 100644 index 000000000000..dabaf98943d0 --- /dev/null +++ b/arch/s390/kernel/dis.c @@ -0,0 +1,1278 @@ +/* + * arch/s390/kernel/dis.c + * + * Disassemble s390 instructions. + * + * Copyright IBM Corp. 2007 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_64BIT +#define ONELONG "%08lx: " +#else /* CONFIG_64BIT */ +#define ONELONG "%016lx: " +#endif /* CONFIG_64BIT */ + +#define OPERAND_GPR 0x1 /* Operand printed as %rx */ +#define OPERAND_FPR 0x2 /* Operand printed as %fx */ +#define OPERAND_AR 0x4 /* Operand printed as %ax */ +#define OPERAND_CR 0x8 /* Operand printed as %cx */ +#define OPERAND_DISP 0x10 /* Operand printed as displacement */ +#define OPERAND_BASE 0x20 /* Operand printed as base register */ +#define OPERAND_INDEX 0x40 /* Operand printed as index register */ +#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ +#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ +#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ + +enum { + UNUSED, /* Indicates the end of the operand list */ + R_8, /* GPR starting at position 8 */ + R_12, /* GPR starting at position 12 */ + R_16, /* GPR starting at position 16 */ + R_20, /* GPR starting at position 20 */ + R_24, /* GPR starting at position 24 */ + R_28, /* GPR starting at position 28 */ + R_32, /* GPR starting at position 32 */ + F_8, /* FPR starting at position 8 */ + F_12, /* FPR starting at position 12 */ + F_16, /* FPR starting at position 16 */ + F_20, /* FPR starting at position 16 */ + F_24, /* FPR starting at position 24 */ + F_28, /* FPR starting at position 28 */ + F_32, /* FPR starting at position 32 */ + A_8, /* Access reg. starting at position 8 */ + A_12, /* Access reg. starting at position 12 */ + A_24, /* Access reg. starting at position 24 */ + A_28, /* Access reg. starting at position 28 */ + C_8, /* Control reg. starting at position 8 */ + C_12, /* Control reg. starting at position 12 */ + B_16, /* Base register starting at position 16 */ + B_32, /* Base register starting at position 32 */ + X_12, /* Index register starting at position 12 */ + D_20, /* Displacement starting at position 20 */ + D_36, /* Displacement starting at position 36 */ + D20_20, /* 20 bit displacement starting at 20 */ + L4_8, /* 4 bit length starting at position 8 */ + L4_12, /* 4 bit length starting at position 12 */ + L8_8, /* 8 bit length starting at position 8 */ + U4_8, /* 4 bit unsigned value starting at 8 */ + U4_12, /* 4 bit unsigned value starting at 12 */ + U4_16, /* 4 bit unsigned value starting at 16 */ + U4_20, /* 4 bit unsigned value starting at 20 */ + U8_8, /* 8 bit unsigned value starting at 8 */ + U8_16, /* 8 bit unsigned value starting at 16 */ + I16_16, /* 16 bit signed value starting at 16 */ + U16_16, /* 16 bit unsigned value starting at 16 */ + J16_16, /* PC relative jump offset at 16 */ + J32_16, /* PC relative long offset at 16 */ + I32_16, /* 32 bit signed value starting at 16 */ + U32_16, /* 32 bit unsigned value starting at 16 */ + M_16, /* 4 bit optional mask starting at 16 */ + RO_28, /* optional GPR starting at position 28 */ +}; + +/* + * Enumeration of the different instruction formats. + * For details consult the principles of operation. + */ +enum { + INSTR_INVALID, + INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, + INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, + INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, + INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, + INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, + INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, + INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, + INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, + INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, + INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, + INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, + INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, + INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, + INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, + INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, + INSTR_S_00, INSTR_S_RD, +}; + +struct operand { + int bits; /* The number of bits in the operand. */ + int shift; /* The number of bits to shift. */ + int flags; /* One bit syntax flags. */ +}; + +struct insn { + const char name[5]; + unsigned char opfrag; + unsigned char format; +}; + +static const struct operand operands[] = +{ + [UNUSED] = { 0, 0, 0 }, + [R_8] = { 4, 8, OPERAND_GPR }, + [R_12] = { 4, 12, OPERAND_GPR }, + [R_16] = { 4, 16, OPERAND_GPR }, + [R_20] = { 4, 20, OPERAND_GPR }, + [R_24] = { 4, 24, OPERAND_GPR }, + [R_28] = { 4, 28, OPERAND_GPR }, + [R_32] = { 4, 32, OPERAND_GPR }, + [F_8] = { 4, 8, OPERAND_FPR }, + [F_12] = { 4, 12, OPERAND_FPR }, + [F_16] = { 4, 16, OPERAND_FPR }, + [F_20] = { 4, 16, OPERAND_FPR }, + [F_24] = { 4, 24, OPERAND_FPR }, + [F_28] = { 4, 28, OPERAND_FPR }, + [F_32] = { 4, 32, OPERAND_FPR }, + [A_8] = { 4, 8, OPERAND_AR }, + [A_12] = { 4, 12, OPERAND_AR }, + [A_24] = { 4, 24, OPERAND_AR }, + [A_28] = { 4, 28, OPERAND_AR }, + [C_8] = { 4, 8, OPERAND_CR }, + [C_12] = { 4, 12, OPERAND_CR }, + [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, + [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, + [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, + [D_20] = { 12, 20, OPERAND_DISP }, + [D_36] = { 12, 36, OPERAND_DISP }, + [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED }, + [L4_8] = { 4, 8, OPERAND_LENGTH }, + [L4_12] = { 4, 12, OPERAND_LENGTH }, + [L8_8] = { 8, 8, OPERAND_LENGTH }, + [U4_8] = { 4, 8, 0 }, + [U4_12] = { 4, 12, 0 }, + [U4_16] = { 4, 16, 0 }, + [U4_20] = { 4, 20, 0 }, + [U8_8] = { 8, 8, 0 }, + [U8_16] = { 8, 16, 0 }, + [I16_16] = { 16, 16, OPERAND_SIGNED }, + [U16_16] = { 16, 16, 0 }, + [J16_16] = { 16, 16, OPERAND_PCREL }, + [J32_16] = { 32, 16, OPERAND_PCREL }, + [I32_16] = { 32, 16, OPERAND_SIGNED }, + [U32_16] = { 32, 16, 0 }, + [M_16] = { 4, 16, 0 }, + [RO_28] = { 4, 28, OPERAND_GPR } +}; + +static const unsigned char formats[][7] = { + [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ + [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ + [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ + [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ + [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ + [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ + [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ + [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ + [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ + [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ + [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ + [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ + [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ + [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ + [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ + [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ + [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ + [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ + [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ + [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ + [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ + [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ + [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ + [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ + [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ + [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ + [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ + [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ + [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ + [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ + [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ + [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ + [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ + [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ + [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ + [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ + [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ + [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, + /* e.g. icmh */ + [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ + [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ + [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ + [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ + [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */ + [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */ + [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */ + [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, + /* e.g. madb */ + [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ + [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ + [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ + [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ + [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ + [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ + [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ + [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ + [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, + /* e.g. mvc */ + [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, + /* e.g. srp */ + [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, + /* e.g. pack */ + [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, + /* e.g. mvck */ + [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, + /* e.g. plo */ + [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, + /* e.g. lmd */ + [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ + [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ + [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, + /* e.g. mvcos */ +}; + +static struct insn opcode[] = { +#ifdef CONFIG_64BIT + { "lmd", 0xef, INSTR_SS_RRRDRD3 }, +#endif + { "spm", 0x04, INSTR_RR_R0 }, + { "balr", 0x05, INSTR_RR_RR }, + { "bctr", 0x06, INSTR_RR_RR }, + { "bcr", 0x07, INSTR_RR_UR }, + { "svc", 0x0a, INSTR_RR_U0 }, + { "bsm", 0x0b, INSTR_RR_RR }, + { "bassm", 0x0c, INSTR_RR_RR }, + { "basr", 0x0d, INSTR_RR_RR }, + { "mvcl", 0x0e, INSTR_RR_RR }, + { "clcl", 0x0f, INSTR_RR_RR }, + { "lpr", 0x10, INSTR_RR_RR }, + { "lnr", 0x11, INSTR_RR_RR }, + { "ltr", 0x12, INSTR_RR_RR }, + { "lcr", 0x13, INSTR_RR_RR }, + { "nr", 0x14, INSTR_RR_RR }, + { "clr", 0x15, INSTR_RR_RR }, + { "or", 0x16, INSTR_RR_RR }, + { "xr", 0x17, INSTR_RR_RR }, + { "lr", 0x18, INSTR_RR_RR }, + { "cr", 0x19, INSTR_RR_RR }, + { "ar", 0x1a, INSTR_RR_RR }, + { "sr", 0x1b, INSTR_RR_RR }, + { "mr", 0x1c, INSTR_RR_RR }, + { "dr", 0x1d, INSTR_RR_RR }, + { "alr", 0x1e, INSTR_RR_RR }, + { "slr", 0x1f, INSTR_RR_RR }, + { "lpdr", 0x20, INSTR_RR_FF }, + { "lndr", 0x21, INSTR_RR_FF }, + { "ltdr", 0x22, INSTR_RR_FF }, + { "lcdr", 0x23, INSTR_RR_FF }, + { "hdr", 0x24, INSTR_RR_FF }, + { "ldxr", 0x25, INSTR_RR_FF }, + { "lrdr", 0x25, INSTR_RR_FF }, + { "mxr", 0x26, INSTR_RR_FF }, + { "mxdr", 0x27, INSTR_RR_FF }, + { "ldr", 0x28, INSTR_RR_FF }, + { "cdr", 0x29, INSTR_RR_FF }, + { "adr", 0x2a, INSTR_RR_FF }, + { "sdr", 0x2b, INSTR_RR_FF }, + { "mdr", 0x2c, INSTR_RR_FF }, + { "ddr", 0x2d, INSTR_RR_FF }, + { "awr", 0x2e, INSTR_RR_FF }, + { "swr", 0x2f, INSTR_RR_FF }, + { "lper", 0x30, INSTR_RR_FF }, + { "lner", 0x31, INSTR_RR_FF }, + { "lter", 0x32, INSTR_RR_FF }, + { "lcer", 0x33, INSTR_RR_FF }, + { "her", 0x34, INSTR_RR_FF }, + { "ledr", 0x35, INSTR_RR_FF }, + { "lrer", 0x35, INSTR_RR_FF }, + { "axr", 0x36, INSTR_RR_FF }, + { "sxr", 0x37, INSTR_RR_FF }, + { "ler", 0x38, INSTR_RR_FF }, + { "cer", 0x39, INSTR_RR_FF }, + { "aer", 0x3a, INSTR_RR_FF }, + { "ser", 0x3b, INSTR_RR_FF }, + { "mder", 0x3c, INSTR_RR_FF }, + { "mer", 0x3c, INSTR_RR_FF }, + { "der", 0x3d, INSTR_RR_FF }, + { "aur", 0x3e, INSTR_RR_FF }, + { "sur", 0x3f, INSTR_RR_FF }, + { "sth", 0x40, INSTR_RX_RRRD }, + { "la", 0x41, INSTR_RX_RRRD }, + { "stc", 0x42, INSTR_RX_RRRD }, + { "ic", 0x43, INSTR_RX_RRRD }, + { "ex", 0x44, INSTR_RX_RRRD }, + { "bal", 0x45, INSTR_RX_RRRD }, + { "bct", 0x46, INSTR_RX_RRRD }, + { "bc", 0x47, INSTR_RX_URRD }, + { "lh", 0x48, INSTR_RX_RRRD }, + { "ch", 0x49, INSTR_RX_RRRD }, + { "ah", 0x4a, INSTR_RX_RRRD }, + { "sh", 0x4b, INSTR_RX_RRRD }, + { "mh", 0x4c, INSTR_RX_RRRD }, + { "bas", 0x4d, INSTR_RX_RRRD }, + { "cvd", 0x4e, INSTR_RX_RRRD }, + { "cvb", 0x4f, INSTR_RX_RRRD }, + { "st", 0x50, INSTR_RX_RRRD }, + { "lae", 0x51, INSTR_RX_RRRD }, + { "n", 0x54, INSTR_RX_RRRD }, + { "cl", 0x55, INSTR_RX_RRRD }, + { "o", 0x56, INSTR_RX_RRRD }, + { "x", 0x57, INSTR_RX_RRRD }, + { "l", 0x58, INSTR_RX_RRRD }, + { "c", 0x59, INSTR_RX_RRRD }, + { "a", 0x5a, INSTR_RX_RRRD }, + { "s", 0x5b, INSTR_RX_RRRD }, + { "m", 0x5c, INSTR_RX_RRRD }, + { "d", 0x5d, INSTR_RX_RRRD }, + { "al", 0x5e, INSTR_RX_RRRD }, + { "sl", 0x5f, INSTR_RX_RRRD }, + { "std", 0x60, INSTR_RX_FRRD }, + { "mxd", 0x67, INSTR_RX_FRRD }, + { "ld", 0x68, INSTR_RX_FRRD }, + { "cd", 0x69, INSTR_RX_FRRD }, + { "ad", 0x6a, INSTR_RX_FRRD }, + { "sd", 0x6b, INSTR_RX_FRRD }, + { "md", 0x6c, INSTR_RX_FRRD }, + { "dd", 0x6d, INSTR_RX_FRRD }, + { "aw", 0x6e, INSTR_RX_FRRD }, + { "sw", 0x6f, INSTR_RX_FRRD }, + { "ste", 0x70, INSTR_RX_FRRD }, + { "ms", 0x71, INSTR_RX_RRRD }, + { "le", 0x78, INSTR_RX_FRRD }, + { "ce", 0x79, INSTR_RX_FRRD }, + { "ae", 0x7a, INSTR_RX_FRRD }, + { "se", 0x7b, INSTR_RX_FRRD }, + { "mde", 0x7c, INSTR_RX_FRRD }, + { "me", 0x7c, INSTR_RX_FRRD }, + { "de", 0x7d, INSTR_RX_FRRD }, + { "au", 0x7e, INSTR_RX_FRRD }, + { "su", 0x7f, INSTR_RX_FRRD }, + { "ssm", 0x80, INSTR_S_RD }, + { "lpsw", 0x82, INSTR_S_RD }, + { "diag", 0x83, INSTR_RS_RRRD }, + { "brxh", 0x84, INSTR_RSI_RRP }, + { "brxle", 0x85, INSTR_RSI_RRP }, + { "bxh", 0x86, INSTR_RS_RRRD }, + { "bxle", 0x87, INSTR_RS_RRRD }, + { "srl", 0x88, INSTR_RS_R0RD }, + { "sll", 0x89, INSTR_RS_R0RD }, + { "sra", 0x8a, INSTR_RS_R0RD }, + { "sla", 0x8b, INSTR_RS_R0RD }, + { "srdl", 0x8c, INSTR_RS_R0RD }, + { "sldl", 0x8d, INSTR_RS_R0RD }, + { "srda", 0x8e, INSTR_RS_R0RD }, + { "slda", 0x8f, INSTR_RS_R0RD }, + { "stm", 0x90, INSTR_RS_RRRD }, + { "tm", 0x91, INSTR_SI_URD }, + { "mvi", 0x92, INSTR_SI_URD }, + { "ts", 0x93, INSTR_S_RD }, + { "ni", 0x94, INSTR_SI_URD }, + { "cli", 0x95, INSTR_SI_URD }, + { "oi", 0x96, INSTR_SI_URD }, + { "xi", 0x97, INSTR_SI_URD }, + { "lm", 0x98, INSTR_RS_RRRD }, + { "trace", 0x99, INSTR_RS_RRRD }, + { "lam", 0x9a, INSTR_RS_AARD }, + { "stam", 0x9b, INSTR_RS_AARD }, + { "mvcle", 0xa8, INSTR_RS_RRRD }, + { "clcle", 0xa9, INSTR_RS_RRRD }, + { "stnsm", 0xac, INSTR_SI_URD }, + { "stosm", 0xad, INSTR_SI_URD }, + { "sigp", 0xae, INSTR_RS_RRRD }, + { "mc", 0xaf, INSTR_SI_URD }, + { "lra", 0xb1, INSTR_RX_RRRD }, + { "stctl", 0xb6, INSTR_RS_CCRD }, + { "lctl", 0xb7, INSTR_RS_CCRD }, + { "cs", 0xba, INSTR_RS_RRRD }, + { "cds", 0xbb, INSTR_RS_RRRD }, + { "clm", 0xbd, INSTR_RS_RURD }, + { "stcm", 0xbe, INSTR_RS_RURD }, + { "icm", 0xbf, INSTR_RS_RURD }, + { "mvn", 0xd1, INSTR_SS_L0RDRD }, + { "mvc", 0xd2, INSTR_SS_L0RDRD }, + { "mvz", 0xd3, INSTR_SS_L0RDRD }, + { "nc", 0xd4, INSTR_SS_L0RDRD }, + { "clc", 0xd5, INSTR_SS_L0RDRD }, + { "oc", 0xd6, INSTR_SS_L0RDRD }, + { "xc", 0xd7, INSTR_SS_L0RDRD }, + { "mvck", 0xd9, INSTR_SS_RRRDRD }, + { "mvcp", 0xda, INSTR_SS_RRRDRD }, + { "mvcs", 0xdb, INSTR_SS_RRRDRD }, + { "tr", 0xdc, INSTR_SS_L0RDRD }, + { "trt", 0xdd, INSTR_SS_L0RDRD }, + { "ed", 0xde, INSTR_SS_L0RDRD }, + { "edmk", 0xdf, INSTR_SS_L0RDRD }, + { "pku", 0xe1, INSTR_SS_L0RDRD }, + { "unpku", 0xe2, INSTR_SS_L0RDRD }, + { "mvcin", 0xe8, INSTR_SS_L0RDRD }, + { "pka", 0xe9, INSTR_SS_L0RDRD }, + { "unpka", 0xea, INSTR_SS_L0RDRD }, + { "plo", 0xee, INSTR_SS_RRRDRD2 }, + { "srp", 0xf0, INSTR_SS_LIRDRD }, + { "mvo", 0xf1, INSTR_SS_LLRDRD }, + { "pack", 0xf2, INSTR_SS_LLRDRD }, + { "unpk", 0xf3, INSTR_SS_LLRDRD }, + { "zap", 0xf8, INSTR_SS_LLRDRD }, + { "cp", 0xf9, INSTR_SS_LLRDRD }, + { "ap", 0xfa, INSTR_SS_LLRDRD }, + { "sp", 0xfb, INSTR_SS_LLRDRD }, + { "mp", 0xfc, INSTR_SS_LLRDRD }, + { "dp", 0xfd, INSTR_SS_LLRDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_01[] = { +#ifdef CONFIG_64BIT + { "sam64", 0x0e, INSTR_E }, +#endif + { "pr", 0x01, INSTR_E }, + { "upt", 0x02, INSTR_E }, + { "sckpf", 0x07, INSTR_E }, + { "tam", 0x0b, INSTR_E }, + { "sam24", 0x0c, INSTR_E }, + { "sam31", 0x0d, INSTR_E }, + { "trap2", 0xff, INSTR_E }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a5[] = { +#ifdef CONFIG_64BIT + { "iihh", 0x00, INSTR_RI_RU }, + { "iihl", 0x01, INSTR_RI_RU }, + { "iilh", 0x02, INSTR_RI_RU }, + { "iill", 0x03, INSTR_RI_RU }, + { "nihh", 0x04, INSTR_RI_RU }, + { "nihl", 0x05, INSTR_RI_RU }, + { "nilh", 0x06, INSTR_RI_RU }, + { "nill", 0x07, INSTR_RI_RU }, + { "oihh", 0x08, INSTR_RI_RU }, + { "oihl", 0x09, INSTR_RI_RU }, + { "oilh", 0x0a, INSTR_RI_RU }, + { "oill", 0x0b, INSTR_RI_RU }, + { "llihh", 0x0c, INSTR_RI_RU }, + { "llihl", 0x0d, INSTR_RI_RU }, + { "llilh", 0x0e, INSTR_RI_RU }, + { "llill", 0x0f, INSTR_RI_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a7[] = { +#ifdef CONFIG_64BIT + { "tmhh", 0x02, INSTR_RI_RU }, + { "tmhl", 0x03, INSTR_RI_RU }, + { "brctg", 0x07, INSTR_RI_RP }, + { "lghi", 0x09, INSTR_RI_RI }, + { "aghi", 0x0b, INSTR_RI_RI }, + { "mghi", 0x0d, INSTR_RI_RI }, + { "cghi", 0x0f, INSTR_RI_RI }, +#endif + { "tmlh", 0x00, INSTR_RI_RU }, + { "tmll", 0x01, INSTR_RI_RU }, + { "brc", 0x04, INSTR_RI_UP }, + { "bras", 0x05, INSTR_RI_RP }, + { "brct", 0x06, INSTR_RI_RP }, + { "lhi", 0x08, INSTR_RI_RI }, + { "ahi", 0x0a, INSTR_RI_RI }, + { "mhi", 0x0c, INSTR_RI_RI }, + { "chi", 0x0e, INSTR_RI_RI }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b2[] = { +#ifdef CONFIG_64BIT + { "sske", 0x2b, INSTR_RRF_M0RR }, + { "stckf", 0x7c, INSTR_S_RD }, + { "cu21", 0xa6, INSTR_RRF_M0RR }, + { "cuutf", 0xa6, INSTR_RRF_M0RR }, + { "cu12", 0xa7, INSTR_RRF_M0RR }, + { "cutfu", 0xa7, INSTR_RRF_M0RR }, + { "stfle", 0xb0, INSTR_S_RD }, + { "lpswe", 0xb2, INSTR_S_RD }, +#endif + { "stidp", 0x02, INSTR_S_RD }, + { "sck", 0x04, INSTR_S_RD }, + { "stck", 0x05, INSTR_S_RD }, + { "sckc", 0x06, INSTR_S_RD }, + { "stckc", 0x07, INSTR_S_RD }, + { "spt", 0x08, INSTR_S_RD }, + { "stpt", 0x09, INSTR_S_RD }, + { "spka", 0x0a, INSTR_S_RD }, + { "ipk", 0x0b, INSTR_S_00 }, + { "ptlb", 0x0d, INSTR_S_00 }, + { "spx", 0x10, INSTR_S_RD }, + { "stpx", 0x11, INSTR_S_RD }, + { "stap", 0x12, INSTR_S_RD }, + { "sie", 0x14, INSTR_S_RD }, + { "pc", 0x18, INSTR_S_RD }, + { "sac", 0x19, INSTR_S_RD }, + { "cfc", 0x1a, INSTR_S_RD }, + { "ipte", 0x21, INSTR_RRE_RR }, + { "ipm", 0x22, INSTR_RRE_R0 }, + { "ivsk", 0x23, INSTR_RRE_RR }, + { "iac", 0x24, INSTR_RRE_R0 }, + { "ssar", 0x25, INSTR_RRE_R0 }, + { "epar", 0x26, INSTR_RRE_R0 }, + { "esar", 0x27, INSTR_RRE_R0 }, + { "pt", 0x28, INSTR_RRE_RR }, + { "iske", 0x29, INSTR_RRE_RR }, + { "rrbe", 0x2a, INSTR_RRE_RR }, + { "sske", 0x2b, INSTR_RRE_RR }, + { "tb", 0x2c, INSTR_RRE_0R }, + { "dxr", 0x2d, INSTR_RRE_F0 }, + { "pgin", 0x2e, INSTR_RRE_RR }, + { "pgout", 0x2f, INSTR_RRE_RR }, + { "csch", 0x30, INSTR_S_00 }, + { "hsch", 0x31, INSTR_S_00 }, + { "msch", 0x32, INSTR_S_RD }, + { "ssch", 0x33, INSTR_S_RD }, + { "stsch", 0x34, INSTR_S_RD }, + { "tsch", 0x35, INSTR_S_RD }, + { "tpi", 0x36, INSTR_S_RD }, + { "sal", 0x37, INSTR_S_00 }, + { "rsch", 0x38, INSTR_S_00 }, + { "stcrw", 0x39, INSTR_S_RD }, + { "stcps", 0x3a, INSTR_S_RD }, + { "rchp", 0x3b, INSTR_S_00 }, + { "schm", 0x3c, INSTR_S_00 }, + { "bakr", 0x40, INSTR_RRE_RR }, + { "cksm", 0x41, INSTR_RRE_RR }, + { "sqdr", 0x44, INSTR_RRE_F0 }, + { "sqer", 0x45, INSTR_RRE_F0 }, + { "stura", 0x46, INSTR_RRE_RR }, + { "msta", 0x47, INSTR_RRE_R0 }, + { "palb", 0x48, INSTR_RRE_00 }, + { "ereg", 0x49, INSTR_RRE_RR }, + { "esta", 0x4a, INSTR_RRE_RR }, + { "lura", 0x4b, INSTR_RRE_RR }, + { "tar", 0x4c, INSTR_RRE_AR }, + { "cpya", INSTR_RRE_AA }, + { "sar", 0x4e, INSTR_RRE_AR }, + { "ear", 0x4f, INSTR_RRE_RA }, + { "csp", 0x50, INSTR_RRE_RR }, + { "msr", 0x52, INSTR_RRE_RR }, + { "mvpg", 0x54, INSTR_RRE_RR }, + { "mvst", 0x55, INSTR_RRE_RR }, + { "cuse", 0x57, INSTR_RRE_RR }, + { "bsg", 0x58, INSTR_RRE_RR }, + { "bsa", 0x5a, INSTR_RRE_RR }, + { "clst", 0x5d, INSTR_RRE_RR }, + { "srst", 0x5e, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "siga", 0x74, INSTR_S_RD }, + { "xsch", 0x76, INSTR_S_00 }, + { "rp", 0x77, INSTR_S_RD }, + { "stcke", 0x78, INSTR_S_RD }, + { "sacf", 0x79, INSTR_S_RD }, + { "stsi", 0x7d, INSTR_S_RD }, + { "srnm", 0x99, INSTR_S_RD }, + { "stfpc", 0x9c, INSTR_S_RD }, + { "lfpc", 0x9d, INSTR_S_RD }, + { "tre", 0xa5, INSTR_RRE_RR }, + { "cuutf", 0xa6, INSTR_RRE_RR }, + { "cutfu", 0xa7, INSTR_RRE_RR }, + { "stfl", 0xb1, INSTR_S_RD }, + { "trap4", 0xff, INSTR_S_RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b3[] = { +#ifdef CONFIG_64BIT + { "maylr", 0x38, INSTR_RRF_F0FF }, + { "mylr", 0x39, INSTR_RRF_F0FF }, + { "mayr", 0x3a, INSTR_RRF_F0FF }, + { "myr", 0x3b, INSTR_RRF_F0FF }, + { "mayhr", 0x3c, INSTR_RRF_F0FF }, + { "myhr", 0x3d, INSTR_RRF_F0FF }, + { "cegbr", 0xa4, INSTR_RRE_RR }, + { "cdgbr", 0xa5, INSTR_RRE_RR }, + { "cxgbr", 0xa6, INSTR_RRE_RR }, + { "cgebr", 0xa8, INSTR_RRF_U0RF }, + { "cgdbr", 0xa9, INSTR_RRF_U0RF }, + { "cgxbr", 0xaa, INSTR_RRF_U0RF }, + { "cfer", 0xb8, INSTR_RRF_U0RF }, + { "cfdr", 0xb9, INSTR_RRF_U0RF }, + { "cfxr", 0xba, INSTR_RRF_U0RF }, + { "cegr", 0xc4, INSTR_RRE_RR }, + { "cdgr", 0xc5, INSTR_RRE_RR }, + { "cxgr", 0xc6, INSTR_RRE_RR }, + { "cger", 0xc8, INSTR_RRF_U0RF }, + { "cgdr", 0xc9, INSTR_RRF_U0RF }, + { "cgxr", 0xca, INSTR_RRF_U0RF }, +#endif + { "lpebr", 0x00, INSTR_RRE_FF }, + { "lnebr", 0x01, INSTR_RRE_FF }, + { "ltebr", 0x02, INSTR_RRE_FF }, + { "lcebr", 0x03, INSTR_RRE_FF }, + { "ldebr", 0x04, INSTR_RRE_FF }, + { "lxdbr", 0x05, INSTR_RRE_FF }, + { "lxebr", 0x06, INSTR_RRE_FF }, + { "mxdbr", 0x07, INSTR_RRE_FF }, + { "kebr", 0x08, INSTR_RRE_FF }, + { "cebr", 0x09, INSTR_RRE_FF }, + { "aebr", 0x0a, INSTR_RRE_FF }, + { "sebr", 0x0b, INSTR_RRE_FF }, + { "mdebr", 0x0c, INSTR_RRE_FF }, + { "debr", 0x0d, INSTR_RRE_FF }, + { "maebr", 0x0e, INSTR_RRF_F0FF }, + { "msebr", 0x0f, INSTR_RRF_F0FF }, + { "lpdbr", 0x10, INSTR_RRE_FF }, + { "lndbr", 0x11, INSTR_RRE_FF }, + { "ltdbr", 0x12, INSTR_RRE_FF }, + { "lcdbr", 0x13, INSTR_RRE_FF }, + { "sqebr", 0x14, INSTR_RRE_FF }, + { "sqdbr", 0x15, INSTR_RRE_FF }, + { "sqxbr", 0x16, INSTR_RRE_FF }, + { "meebr", 0x17, INSTR_RRE_FF }, + { "kdbr", 0x18, INSTR_RRE_FF }, + { "cdbr", 0x19, INSTR_RRE_FF }, + { "adbr", 0x1a, INSTR_RRE_FF }, + { "sdbr", 0x1b, INSTR_RRE_FF }, + { "mdbr", 0x1c, INSTR_RRE_FF }, + { "ddbr", 0x1d, INSTR_RRE_FF }, + { "madbr", 0x1e, INSTR_RRF_F0FF }, + { "msdbr", 0x1f, INSTR_RRF_F0FF }, + { "lder", 0x24, INSTR_RRE_FF }, + { "lxdr", 0x25, INSTR_RRE_FF }, + { "lxer", 0x26, INSTR_RRE_FF }, + { "maer", 0x2e, INSTR_RRF_F0FF }, + { "mser", 0x2f, INSTR_RRF_F0FF }, + { "sqxr", 0x36, INSTR_RRE_FF }, + { "meer", 0x37, INSTR_RRE_FF }, + { "madr", 0x3e, INSTR_RRF_F0FF }, + { "msdr", 0x3f, INSTR_RRF_F0FF }, + { "lpxbr", 0x40, INSTR_RRE_FF }, + { "lnxbr", 0x41, INSTR_RRE_FF }, + { "ltxbr", 0x42, INSTR_RRE_FF }, + { "lcxbr", 0x43, INSTR_RRE_FF }, + { "ledbr", 0x44, INSTR_RRE_FF }, + { "ldxbr", 0x45, INSTR_RRE_FF }, + { "lexbr", 0x46, INSTR_RRE_FF }, + { "fixbr", 0x47, INSTR_RRF_U0FF }, + { "kxbr", 0x48, INSTR_RRE_FF }, + { "cxbr", 0x49, INSTR_RRE_FF }, + { "axbr", 0x4a, INSTR_RRE_FF }, + { "sxbr", 0x4b, INSTR_RRE_FF }, + { "mxbr", 0x4c, INSTR_RRE_FF }, + { "dxbr", 0x4d, INSTR_RRE_FF }, + { "tbedr", 0x50, INSTR_RRF_U0FF }, + { "tbdr", 0x51, INSTR_RRF_U0FF }, + { "diebr", 0x53, INSTR_RRF_FUFF }, + { "fiebr", 0x57, INSTR_RRF_U0FF }, + { "thder", 0x58, INSTR_RRE_RR }, + { "thdr", 0x59, INSTR_RRE_RR }, + { "didbr", 0x5b, INSTR_RRF_FUFF }, + { "fidbr", 0x5f, INSTR_RRF_U0FF }, + { "lpxr", 0x60, INSTR_RRE_FF }, + { "lnxr", 0x61, INSTR_RRE_FF }, + { "ltxr", 0x62, INSTR_RRE_FF }, + { "lcxr", 0x63, INSTR_RRE_FF }, + { "lxr", 0x65, INSTR_RRE_RR }, + { "lexr", 0x66, INSTR_RRE_FF }, + { "fixr", 0x67, INSTR_RRF_U0FF }, + { "cxr", 0x69, INSTR_RRE_FF }, + { "lzer", 0x74, INSTR_RRE_R0 }, + { "lzdr", 0x75, INSTR_RRE_R0 }, + { "lzxr", 0x76, INSTR_RRE_R0 }, + { "fier", 0x77, INSTR_RRF_U0FF }, + { "fidr", 0x7f, INSTR_RRF_U0FF }, + { "sfpc", 0x84, INSTR_RRE_RR_OPT }, + { "efpc", 0x8c, INSTR_RRE_RR_OPT }, + { "cefbr", 0x94, INSTR_RRE_RF }, + { "cdfbr", 0x95, INSTR_RRE_RF }, + { "cxfbr", 0x96, INSTR_RRE_RF }, + { "cfebr", 0x98, INSTR_RRF_U0RF }, + { "cfdbr", 0x99, INSTR_RRF_U0RF }, + { "cfxbr", 0x9a, INSTR_RRF_U0RF }, + { "cefr", 0xb4, INSTR_RRE_RF }, + { "cdfr", 0xb5, INSTR_RRE_RF }, + { "cxfr", 0xb6, INSTR_RRE_RF }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b9[] = { +#ifdef CONFIG_64BIT + { "lpgr", 0x00, INSTR_RRE_RR }, + { "lngr", 0x01, INSTR_RRE_RR }, + { "ltgr", 0x02, INSTR_RRE_RR }, + { "lcgr", 0x03, INSTR_RRE_RR }, + { "lgr", 0x04, INSTR_RRE_RR }, + { "lurag", 0x05, INSTR_RRE_RR }, + { "lgbr", 0x06, INSTR_RRE_RR }, + { "lghr", 0x07, INSTR_RRE_RR }, + { "agr", 0x08, INSTR_RRE_RR }, + { "sgr", 0x09, INSTR_RRE_RR }, + { "algr", 0x0a, INSTR_RRE_RR }, + { "slgr", 0x0b, INSTR_RRE_RR }, + { "msgr", 0x0c, INSTR_RRE_RR }, + { "dsgr", 0x0d, INSTR_RRE_RR }, + { "eregg", 0x0e, INSTR_RRE_RR }, + { "lrvgr", 0x0f, INSTR_RRE_RR }, + { "lpgfr", 0x10, INSTR_RRE_RR }, + { "lngfr", 0x11, INSTR_RRE_RR }, + { "ltgfr", 0x12, INSTR_RRE_RR }, + { "lcgfr", 0x13, INSTR_RRE_RR }, + { "lgfr", 0x14, INSTR_RRE_RR }, + { "llgfr", 0x16, INSTR_RRE_RR }, + { "llgtr", 0x17, INSTR_RRE_RR }, + { "agfr", 0x18, INSTR_RRE_RR }, + { "sgfr", 0x19, INSTR_RRE_RR }, + { "algfr", 0x1a, INSTR_RRE_RR }, + { "slgfr", 0x1b, INSTR_RRE_RR }, + { "msgfr", 0x1c, INSTR_RRE_RR }, + { "dsgfr", 0x1d, INSTR_RRE_RR }, + { "cgr", 0x20, INSTR_RRE_RR }, + { "clgr", 0x21, INSTR_RRE_RR }, + { "sturg", 0x25, INSTR_RRE_RR }, + { "lbr", 0x26, INSTR_RRE_RR }, + { "lhr", 0x27, INSTR_RRE_RR }, + { "cgfr", 0x30, INSTR_RRE_RR }, + { "clgfr", 0x31, INSTR_RRE_RR }, + { "bctgr", 0x46, INSTR_RRE_RR }, + { "ngr", 0x80, INSTR_RRE_RR }, + { "ogr", 0x81, INSTR_RRE_RR }, + { "xgr", 0x82, INSTR_RRE_RR }, + { "flogr", 0x83, INSTR_RRE_RR }, + { "llgcr", 0x84, INSTR_RRE_RR }, + { "llghr", 0x85, INSTR_RRE_RR }, + { "mlgr", 0x86, INSTR_RRE_RR }, + { "dlgr", 0x87, INSTR_RRE_RR }, + { "alcgr", 0x88, INSTR_RRE_RR }, + { "slbgr", 0x89, INSTR_RRE_RR }, + { "cspg", 0x8a, INSTR_RRE_RR }, + { "idte", 0x8e, INSTR_RRF_R0RR }, + { "llcr", 0x94, INSTR_RRE_RR }, + { "llhr", 0x95, INSTR_RRE_RR }, + { "esea", 0x9d, INSTR_RRE_R0 }, + { "lptea", 0xaa, INSTR_RRF_RURR }, + { "cu14", 0xb0, INSTR_RRF_M0RR }, + { "cu24", 0xb1, INSTR_RRF_M0RR }, + { "cu41", 0xb2, INSTR_RRF_M0RR }, + { "cu42", 0xb3, INSTR_RRF_M0RR }, +#endif + { "kmac", 0x1e, INSTR_RRE_RR }, + { "lrvr", 0x1f, INSTR_RRE_RR }, + { "km", 0x2e, INSTR_RRE_RR }, + { "kmc", 0x2f, INSTR_RRE_RR }, + { "kimd", 0x3e, INSTR_RRE_RR }, + { "klmd", 0x3f, INSTR_RRE_RR }, + { "epsw", 0x8d, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRF_M0RR }, + { "trto", 0x91, INSTR_RRE_RR }, + { "trto", 0x91, INSTR_RRF_M0RR }, + { "trot", 0x92, INSTR_RRE_RR }, + { "trot", 0x92, INSTR_RRF_M0RR }, + { "troo", 0x93, INSTR_RRE_RR }, + { "troo", 0x93, INSTR_RRF_M0RR }, + { "mlr", 0x96, INSTR_RRE_RR }, + { "dlr", 0x97, INSTR_RRE_RR }, + { "alcr", 0x98, INSTR_RRE_RR }, + { "slbr", 0x99, INSTR_RRE_RR }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c0[] = { +#ifdef CONFIG_64BIT + { "lgfi", 0x01, INSTR_RIL_RI }, + { "xihf", 0x06, INSTR_RIL_RU }, + { "xilf", 0x07, INSTR_RIL_RU }, + { "iihf", 0x08, INSTR_RIL_RU }, + { "iilf", 0x09, INSTR_RIL_RU }, + { "nihf", 0x0a, INSTR_RIL_RU }, + { "nilf", 0x0b, INSTR_RIL_RU }, + { "oihf", 0x0c, INSTR_RIL_RU }, + { "oilf", 0x0d, INSTR_RIL_RU }, + { "llihf", 0x0e, INSTR_RIL_RU }, + { "llilf", 0x0f, INSTR_RIL_RU }, +#endif + { "larl", 0x00, INSTR_RIL_RP }, + { "brcl", 0x04, INSTR_RIL_UP }, + { "brasl", 0x05, INSTR_RIL_RP }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c2[] = { +#ifdef CONFIG_64BIT + { "slgfi", 0x04, INSTR_RIL_RU }, + { "slfi", 0x05, INSTR_RIL_RU }, + { "agfi", 0x08, INSTR_RIL_RI }, + { "afi", 0x09, INSTR_RIL_RI }, + { "algfi", 0x0a, INSTR_RIL_RU }, + { "alfi", 0x0b, INSTR_RIL_RU }, + { "cgfi", 0x0c, INSTR_RIL_RI }, + { "cfi", 0x0d, INSTR_RIL_RI }, + { "clgfi", 0x0e, INSTR_RIL_RU }, + { "clfi", 0x0f, INSTR_RIL_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c8[] = { +#ifdef CONFIG_64BIT + { "mvcos", 0x00, INSTR_SSF_RRDRD }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e3[] = { +#ifdef CONFIG_64BIT + { "ltg", 0x02, INSTR_RXY_RRRD }, + { "lrag", 0x03, INSTR_RXY_RRRD }, + { "lg", 0x04, INSTR_RXY_RRRD }, + { "cvby", 0x06, INSTR_RXY_RRRD }, + { "ag", 0x08, INSTR_RXY_RRRD }, + { "sg", 0x09, INSTR_RXY_RRRD }, + { "alg", 0x0a, INSTR_RXY_RRRD }, + { "slg", 0x0b, INSTR_RXY_RRRD }, + { "msg", 0x0c, INSTR_RXY_RRRD }, + { "dsg", 0x0d, INSTR_RXY_RRRD }, + { "cvbg", 0x0e, INSTR_RXY_RRRD }, + { "lrvg", 0x0f, INSTR_RXY_RRRD }, + { "lt", 0x12, INSTR_RXY_RRRD }, + { "lray", 0x13, INSTR_RXY_RRRD }, + { "lgf", 0x14, INSTR_RXY_RRRD }, + { "lgh", 0x15, INSTR_RXY_RRRD }, + { "llgf", 0x16, INSTR_RXY_RRRD }, + { "llgt", 0x17, INSTR_RXY_RRRD }, + { "agf", 0x18, INSTR_RXY_RRRD }, + { "sgf", 0x19, INSTR_RXY_RRRD }, + { "algf", 0x1a, INSTR_RXY_RRRD }, + { "slgf", 0x1b, INSTR_RXY_RRRD }, + { "msgf", 0x1c, INSTR_RXY_RRRD }, + { "dsgf", 0x1d, INSTR_RXY_RRRD }, + { "cg", 0x20, INSTR_RXY_RRRD }, + { "clg", 0x21, INSTR_RXY_RRRD }, + { "stg", 0x24, INSTR_RXY_RRRD }, + { "cvdy", 0x26, INSTR_RXY_RRRD }, + { "cvdg", 0x2e, INSTR_RXY_RRRD }, + { "strvg", 0x2f, INSTR_RXY_RRRD }, + { "cgf", 0x30, INSTR_RXY_RRRD }, + { "clgf", 0x31, INSTR_RXY_RRRD }, + { "strvh", 0x3f, INSTR_RXY_RRRD }, + { "bctg", 0x46, INSTR_RXY_RRRD }, + { "sty", 0x50, INSTR_RXY_RRRD }, + { "msy", 0x51, INSTR_RXY_RRRD }, + { "ny", 0x54, INSTR_RXY_RRRD }, + { "cly", 0x55, INSTR_RXY_RRRD }, + { "oy", 0x56, INSTR_RXY_RRRD }, + { "xy", 0x57, INSTR_RXY_RRRD }, + { "ly", 0x58, INSTR_RXY_RRRD }, + { "cy", 0x59, INSTR_RXY_RRRD }, + { "ay", 0x5a, INSTR_RXY_RRRD }, + { "sy", 0x5b, INSTR_RXY_RRRD }, + { "aly", 0x5e, INSTR_RXY_RRRD }, + { "sly", 0x5f, INSTR_RXY_RRRD }, + { "sthy", 0x70, INSTR_RXY_RRRD }, + { "lay", 0x71, INSTR_RXY_RRRD }, + { "stcy", 0x72, INSTR_RXY_RRRD }, + { "icy", 0x73, INSTR_RXY_RRRD }, + { "lb", 0x76, INSTR_RXY_RRRD }, + { "lgb", 0x77, INSTR_RXY_RRRD }, + { "lhy", 0x78, INSTR_RXY_RRRD }, + { "chy", 0x79, INSTR_RXY_RRRD }, + { "ahy", 0x7a, INSTR_RXY_RRRD }, + { "shy", 0x7b, INSTR_RXY_RRRD }, + { "ng", 0x80, INSTR_RXY_RRRD }, + { "og", 0x81, INSTR_RXY_RRRD }, + { "xg", 0x82, INSTR_RXY_RRRD }, + { "mlg", 0x86, INSTR_RXY_RRRD }, + { "dlg", 0x87, INSTR_RXY_RRRD }, + { "alcg", 0x88, INSTR_RXY_RRRD }, + { "slbg", 0x89, INSTR_RXY_RRRD }, + { "stpq", 0x8e, INSTR_RXY_RRRD }, + { "lpq", 0x8f, INSTR_RXY_RRRD }, + { "llgc", 0x90, INSTR_RXY_RRRD }, + { "llgh", 0x91, INSTR_RXY_RRRD }, + { "llc", 0x94, INSTR_RXY_RRRD }, + { "llh", 0x95, INSTR_RXY_RRRD }, +#endif + { "lrv", 0x1e, INSTR_RXY_RRRD }, + { "lrvh", 0x1f, INSTR_RXY_RRRD }, + { "strv", 0x3e, INSTR_RXY_RRRD }, + { "ml", 0x96, INSTR_RXY_RRRD }, + { "dl", 0x97, INSTR_RXY_RRRD }, + { "alc", 0x98, INSTR_RXY_RRRD }, + { "slb", 0x99, INSTR_RXY_RRRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e5[] = { +#ifdef CONFIG_64BIT + { "strag", 0x02, INSTR_SSE_RDRD }, +#endif + { "lasp", 0x00, INSTR_SSE_RDRD }, + { "tprot", 0x01, INSTR_SSE_RDRD }, + { "mvcsk", 0x0e, INSTR_SSE_RDRD }, + { "mvcdk", 0x0f, INSTR_SSE_RDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_eb[] = { +#ifdef CONFIG_64BIT + { "lmg", 0x04, INSTR_RSY_RRRD }, + { "srag", 0x0a, INSTR_RSY_RRRD }, + { "slag", 0x0b, INSTR_RSY_RRRD }, + { "srlg", 0x0c, INSTR_RSY_RRRD }, + { "sllg", 0x0d, INSTR_RSY_RRRD }, + { "tracg", 0x0f, INSTR_RSY_RRRD }, + { "csy", 0x14, INSTR_RSY_RRRD }, + { "rllg", 0x1c, INSTR_RSY_RRRD }, + { "clmh", 0x20, INSTR_RSY_RURD }, + { "clmy", 0x21, INSTR_RSY_RURD }, + { "stmg", 0x24, INSTR_RSY_RRRD }, + { "stctg", 0x25, INSTR_RSY_CCRD }, + { "stmh", 0x26, INSTR_RSY_RRRD }, + { "stcmh", 0x2c, INSTR_RSY_RURD }, + { "stcmy", 0x2d, INSTR_RSY_RURD }, + { "lctlg", 0x2f, INSTR_RSY_CCRD }, + { "csg", 0x30, INSTR_RSY_RRRD }, + { "cdsy", 0x31, INSTR_RSY_RRRD }, + { "cdsg", 0x3e, INSTR_RSY_RRRD }, + { "bxhg", 0x44, INSTR_RSY_RRRD }, + { "bxleg", 0x45, INSTR_RSY_RRRD }, + { "tmy", 0x51, INSTR_SIY_URD }, + { "mviy", 0x52, INSTR_SIY_URD }, + { "niy", 0x54, INSTR_SIY_URD }, + { "cliy", 0x55, INSTR_SIY_URD }, + { "oiy", 0x56, INSTR_SIY_URD }, + { "xiy", 0x57, INSTR_SIY_URD }, + { "icmh", 0x80, INSTR_RSE_RURD }, + { "icmh", 0x80, INSTR_RSY_RURD }, + { "icmy", 0x81, INSTR_RSY_RURD }, + { "clclu", 0x8f, INSTR_RSY_RRRD }, + { "stmy", 0x90, INSTR_RSY_RRRD }, + { "lmh", 0x96, INSTR_RSY_RRRD }, + { "lmy", 0x98, INSTR_RSY_RRRD }, + { "lamy", 0x9a, INSTR_RSY_AARD }, + { "stamy", 0x9b, INSTR_RSY_AARD }, +#endif + { "rll", 0x1d, INSTR_RSY_RRRD }, + { "mvclu", 0x8e, INSTR_RSY_RRRD }, + { "tp", 0xc0, INSTR_RSL_R0RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ec[] = { +#ifdef CONFIG_64BIT + { "brxhg", 0x44, INSTR_RIE_RRP }, + { "brxlg", 0x45, INSTR_RIE_RRP }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ed[] = { +#ifdef CONFIG_64BIT + { "mayl", 0x38, INSTR_RXF_FRRDF }, + { "myl", 0x39, INSTR_RXF_FRRDF }, + { "may", 0x3a, INSTR_RXF_FRRDF }, + { "my", 0x3b, INSTR_RXF_FRRDF }, + { "mayh", 0x3c, INSTR_RXF_FRRDF }, + { "myh", 0x3d, INSTR_RXF_FRRDF }, + { "ley", 0x64, INSTR_RXY_FRRD }, + { "ldy", 0x65, INSTR_RXY_FRRD }, + { "stey", 0x66, INSTR_RXY_FRRD }, + { "stdy", 0x67, INSTR_RXY_FRRD }, +#endif + { "ldeb", 0x04, INSTR_RXE_FRRD }, + { "lxdb", 0x05, INSTR_RXE_FRRD }, + { "lxeb", 0x06, INSTR_RXE_FRRD }, + { "mxdb", 0x07, INSTR_RXE_FRRD }, + { "keb", 0x08, INSTR_RXE_FRRD }, + { "ceb", 0x09, INSTR_RXE_FRRD }, + { "aeb", 0x0a, INSTR_RXE_FRRD }, + { "seb", 0x0b, INSTR_RXE_FRRD }, + { "mdeb", 0x0c, INSTR_RXE_FRRD }, + { "deb", 0x0d, INSTR_RXE_FRRD }, + { "maeb", 0x0e, INSTR_RXF_FRRDF }, + { "mseb", 0x0f, INSTR_RXF_FRRDF }, + { "tceb", 0x10, INSTR_RXE_FRRD }, + { "tcdb", 0x11, INSTR_RXE_FRRD }, + { "tcxb", 0x12, INSTR_RXE_FRRD }, + { "sqeb", 0x14, INSTR_RXE_FRRD }, + { "sqdb", 0x15, INSTR_RXE_FRRD }, + { "meeb", 0x17, INSTR_RXE_FRRD }, + { "kdb", 0x18, INSTR_RXE_FRRD }, + { "cdb", 0x19, INSTR_RXE_FRRD }, + { "adb", 0x1a, INSTR_RXE_FRRD }, + { "sdb", 0x1b, INSTR_RXE_FRRD }, + { "mdb", 0x1c, INSTR_RXE_FRRD }, + { "ddb", 0x1d, INSTR_RXE_FRRD }, + { "madb", 0x1e, INSTR_RXF_FRRDF }, + { "msdb", 0x1f, INSTR_RXF_FRRDF }, + { "lde", 0x24, INSTR_RXE_FRRD }, + { "lxd", 0x25, INSTR_RXE_FRRD }, + { "lxe", 0x26, INSTR_RXE_FRRD }, + { "mae", 0x2e, INSTR_RXF_FRRDF }, + { "mse", 0x2f, INSTR_RXF_FRRDF }, + { "sqe", 0x34, INSTR_RXE_FRRD }, + { "mee", 0x37, INSTR_RXE_FRRD }, + { "mad", 0x3e, INSTR_RXF_FRRDF }, + { "msd", 0x3f, INSTR_RXF_FRRDF }, + { "", 0, INSTR_INVALID } +}; + +/* Extracts an operand value from an instruction. */ +static unsigned int extract_operand(unsigned char *code, + const struct operand *operand) +{ + unsigned int val; + int bits; + + /* Extract fragments of the operand byte for byte. */ + code += operand->shift / 8; + bits = (operand->shift & 7) + operand->bits; + val = 0; + do { + val <<= 8; + val |= (unsigned int) *code++; + bits -= 8; + } while (bits > 0); + val >>= -bits; + val &= ((1U << (operand->bits - 1)) << 1) - 1; + + /* Check for special long displacement case. */ + if (operand->bits == 20 && operand->shift == 20) + val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; + + /* Sign extend value if the operand is signed or pc relative. */ + if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && + (val & (1U << (operand->bits - 1)))) + val |= (-1U << (operand->bits - 1)) << 1; + + /* Double value if the operand is pc relative. */ + if (operand->flags & OPERAND_PCREL) + val <<= 1; + + /* Length x in an instructions has real length x + 1. */ + if (operand->flags & OPERAND_LENGTH) + val++; + return val; +} + +static inline int insn_length(unsigned char code) +{ + return ((((int) code + 64) >> 7) + 1) << 1; +} + +static struct insn *find_insn(unsigned char *code) +{ + unsigned char opfrag = code[1]; + unsigned char opmask; + struct insn *table; + + switch (code[0]) { + case 0x01: + table = opcode_01; + break; + case 0xa5: + table = opcode_a5; + break; + case 0xa7: + table = opcode_a7; + break; + case 0xb2: + table = opcode_b2; + break; + case 0xb3: + table = opcode_b3; + break; + case 0xb9: + table = opcode_b9; + break; + case 0xc0: + table = opcode_c0; + break; + case 0xc2: + table = opcode_c2; + break; + case 0xc8: + table = opcode_c8; + break; + case 0xe3: + table = opcode_e3; + opfrag = code[5]; + break; + case 0xe5: + table = opcode_e5; + break; + case 0xeb: + table = opcode_eb; + opfrag = code[5]; + break; + case 0xec: + table = opcode_ec; + opfrag = code[5]; + break; + case 0xed: + table = opcode_ed; + opfrag = code[5]; + break; + default: + table = opcode; + opfrag = code[0]; + break; + } + while (table->format != INSTR_INVALID) { + opmask = formats[table->format][0]; + if (table->opfrag == (opfrag & opmask)) + return table; + table++; + } + return NULL; +} + +static int print_insn(char *buffer, unsigned char *code, unsigned long addr) +{ + struct insn *insn; + const unsigned char *ops; + const struct operand *operand; + unsigned int value; + char separator; + char *ptr; + + ptr = buffer; + insn = find_insn(code); + if (insn) { + ptr += sprintf(ptr, "%.5s\t", insn->name); + /* Extract the operands. */ + separator = 0; + for (ops = formats[insn->format] + 1; *ops != 0; ops++) { + operand = operands + *ops; + value = extract_operand(code, operand); + if ((operand->flags & OPERAND_INDEX) && value == 0) + continue; + if ((operand->flags & OPERAND_BASE) && + value == 0 && separator == '(') { + separator = ','; + continue; + } + if (separator) + ptr += sprintf(ptr, "%c", separator); + if (operand->flags & OPERAND_GPR) + ptr += sprintf(ptr, "%%r%i", value); + else if (operand->flags & OPERAND_FPR) + ptr += sprintf(ptr, "%%f%i", value); + else if (operand->flags & OPERAND_AR) + ptr += sprintf(ptr, "%%a%i", value); + else if (operand->flags & OPERAND_CR) + ptr += sprintf(ptr, "%%c%i", value); + else if (operand->flags & OPERAND_PCREL) + ptr += sprintf(ptr, "%lx", value + addr); + else if (operand->flags & OPERAND_SIGNED) + ptr += sprintf(ptr, "%i", value); + else + ptr += sprintf(ptr, "%u", value); + if (operand->flags & OPERAND_DISP) + separator = '('; + else if (operand->flags & OPERAND_BASE) { + ptr += sprintf(ptr, ")"); + separator = ','; + } else + separator = ','; + } + } else + ptr += sprintf(ptr, "unknown"); + return (int) (ptr - buffer); +} + +void show_code(struct pt_regs *regs) +{ + char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; + unsigned char code[64]; + char buffer[64], *ptr; + mm_segment_t old_fs; + unsigned long addr; + int start, end, opsize, hops, i; + + /* Get a snapshot of the 64 bytes surrounding the fault address. */ + old_fs = get_fs(); + set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); + for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { + addr = regs->psw.addr - 34 + start; + if (__copy_from_user(code + start - 2, + (char __user *) addr, 2)) + break; + } + for (end = 32; end < 64; end += 2) { + addr = regs->psw.addr + end - 32; + if (__copy_from_user(code + end, + (char __user *) addr, 2)) + break; + } + set_fs(old_fs); + /* Code snapshot useable ? */ + if ((regs->psw.addr & 1) || start >= end) { + printk("%s Code: Bad PSW.\n", mode); + return; + } + /* Find a starting point for the disassembly. */ + while (start < 32) { + hops = 0; + for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { + if (!find_insn(code + start + i)) + break; + i += insn_length(code[start + i]); + } + if (start + i == 32) + /* Looks good, sequence ends at PSW. */ + break; + start += 2; + } + /* Decode the instructions. */ + ptr = buffer; + ptr += sprintf(ptr, "%s Code:", mode); + hops = 0; + while (start < end && hops < 8) { + *ptr++ = (start == 32) ? '>' : ' '; + addr = regs->psw.addr + start - 32; + ptr += sprintf(ptr, ONELONG, addr); + opsize = insn_length(code[start]); + if (start + opsize >= end) + break; + for (i = 0; i < opsize; i++) + ptr += sprintf(ptr, "%02x", code[start + i]); + *ptr++ = '\t'; + if (i < 6) + *ptr++ = '\t'; + ptr += print_insn(ptr, code + start, addr); + start += opsize; + printk(buffer); + ptr = buffer; + ptr += sprintf(ptr, "\n "); + hops++; + } + printk("\n"); +} diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f0e5a320e2ec..a6540940190b 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -188,18 +188,31 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + void show_registers(struct pt_regs *regs) { - mm_segment_t old_fs; char *mode; - int i; mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; printk("%s PSW : %p %p", mode, (void *) regs->psw.mask, (void *) regs->psw.addr); print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk("%s GPRS: " FOURLONG, mode, + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); printk(" " FOURLONG, regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); @@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs) printk(" " FOURLONG, regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); -#if 0 - /* FIXME: this isn't needed any more but it changes the ksymoops - * input. To remove or not to remove ... */ - save_access_regs(regs->acrs); - printk("%s ACRS: %08x %08x %08x %08x\n", mode, - regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); -#endif - - /* - * Print the first 20 byte of the instruction stream at the - * time of the fault. - */ - old_fs = get_fs(); - if (regs->psw.mask & PSW_MASK_PSTATE) - set_fs(USER_DS); - else - set_fs(KERNEL_DS); - printk("%s Code: ", mode); - for (i = 0; i < 20; i++) { - unsigned char c; - if (__get_user(c, (char __user *)(regs->psw.addr + i))) { - printk(" Bad PSW."); - break; - } - printk("%02x ", c); - } - set_fs(old_fs); - - printk("\n"); + show_code(regs); } /* This is called from fs/proc/array.c */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 33b80ced4bc1..96a6f80953a9 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -196,6 +196,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t); extern char *task_show_regs(struct task_struct *task, char *buffer); extern void show_registers(struct pt_regs *regs); +extern void show_code(struct pt_regs *regs); extern void show_trace(struct task_struct *task, unsigned long *sp); unsigned long get_wchan(struct task_struct *p); -- cgit v1.2.3-59-g8ed1b From c0007f1a65762eaf55633d403b380130ec60adad Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:42 +0200 Subject: [S390] Use generic bug. Generic bug implementation for s390. Will increase the value of the console output on BUG() statements since registers r0-r5,r14 will not be clobbered by a printk() call that was previously done before the illegal instruction of BUG() was hit. Also implements an architecture specific WARN_ON(). Output of that could be increased but requires common code change. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/Kconfig | 5 +++ arch/s390/defconfig | 2 ++ arch/s390/kernel/module.c | 4 ++- arch/s390/kernel/traps.c | 17 +++++++++-- arch/s390/kernel/vmlinux.lds.S | 10 +++++- include/asm-s390/bug.h | 69 ++++++++++++++++++++++++++++++++++-------- 6 files changed, 89 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f293aa7b0fa..0f9517bc8e70 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -41,6 +41,11 @@ config GENERIC_HWEIGHT config GENERIC_TIME def_bool y +config GENERIC_BUG + bool + depends on BUG + default y + config NO_IOMEM def_bool y diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 741d2bbb2b37..80046d9e2a3b 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_BUG=y CONFIG_NO_IOMEM=y CONFIG_S390=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" @@ -705,6 +706,7 @@ CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_LIST is not set diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 39d1dd752529..59b4e796680a 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -31,6 +31,7 @@ #include #include #include +#include #if 0 #define DEBUGP printk @@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { vfree(me->arch.syminfo); - return 0; + return module_bug_finalize(hdr, sechdrs, me); } void module_arch_cleanup(struct module *mod) { + module_bug_cleanup(mod); } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a6540940190b..49dec830373a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -30,7 +30,7 @@ #include #include #include - +#include #include #include #include @@ -297,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs) #endif } +int is_valid_bugaddr(unsigned long addr) +{ + return 1; +} + static void __kprobes inline do_trap(long interruption_code, int signr, char *str, struct pt_regs *regs, siginfo_t *info) @@ -323,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr, fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - else - die(str, regs, interruption_code); + else { + enum bug_trap_type btt; + + btt = report_bug(regs->psw.addr & PSW_ADDR_INSN); + if (btt == BUG_TRAP_TYPE_WARN) + return; + die(str, regs, interruption_code); + } } } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index c30716ae130c..418f6426a949 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -45,6 +45,8 @@ SECTIONS __ex_table : { *(__ex_table) } __stop___ex_table = .; + BUG_TABLE + .data : { /* Data */ *(.data) CONSTRUCTORS @@ -77,6 +79,12 @@ SECTIONS *(.init.text) _einittext = .; } + /* + * .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + .exit.text : { *(.exit.text) } + .init.data : { *(.init.data) } . = ALIGN(256); __setup_start = .; @@ -116,7 +124,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) *(.exit.data) *(.exitcall.exit) + *(.exit.data) *(.exitcall.exit) } /* Stabs debugging sections. */ diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h index 876898363944..838684dc6d35 100644 --- a/include/asm-s390/bug.h +++ b/include/asm-s390/bug.h @@ -1,27 +1,70 @@ -#ifndef _S390_BUG_H -#define _S390_BUG_H +#ifndef _ASM_S390_BUG_H +#define _ASM_S390_BUG_H #include #ifdef CONFIG_BUG -static inline __attribute__((noreturn)) void __do_illegal_op(void) -{ -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) - __builtin_trap(); +#ifdef CONFIG_64BIT +#define S390_LONG ".quad" #else - asm volatile(".long 0"); +#define S390_LONG ".long" #endif -} -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - __do_illegal_op(); \ +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define __EMIT_BUG(x) do { \ + asm volatile( \ + "0: j 0b+2\n" \ + "1:\n" \ + ".section .rodata.str,\"aMS\",@progbits,1\n" \ + "2: .asciz \""__FILE__"\"\n" \ + ".previous\n" \ + ".section __bug_table,\"a\"\n" \ + "3:\t" S390_LONG "\t1b,2b\n" \ + " .short %0,%1\n" \ + " .org 3b+%2\n" \ + ".previous\n" \ + : : "i" (__LINE__), \ + "i" (x), \ + "i" (sizeof(struct bug_entry))); \ } while (0) +#else /* CONFIG_DEBUG_BUGVERBOSE */ + +#define __EMIT_BUG(x) do { \ + asm volatile( \ + "0: j 0b+2\n" \ + "1:\n" \ + ".section __bug_table,\"a\"\n" \ + "2:\t" S390_LONG "\t1b\n" \ + " .short %0\n" \ + " .org 2b+%1\n" \ + ".previous\n" \ + : : "i" (x), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#define BUG() __EMIT_BUG(0) + +#define WARN_ON(x) ({ \ + typeof(x) __ret_warn_on = (x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ + __EMIT_BUG(BUGFLAG_WARNING); \ + } else { \ + if (unlikely(__ret_warn_on)) \ + __EMIT_BUG(BUGFLAG_WARNING); \ + } \ + unlikely(__ret_warn_on); \ +}) + #define HAVE_ARCH_BUG -#endif +#define HAVE_ARCH_WARN_ON +#endif /* CONFIG_BUG */ #include -#endif +#endif /* _ASM_S390_BUG_H */ -- cgit v1.2.3-59-g8ed1b From 10c1031f706bbe0690d84cdbccad15b11c6dc661 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:43 +0200 Subject: [S390] Minor fault path optimization. The minor fault path has grown a lot in terms of cycles. In particular the kprobes hook is very costly. Optimize the path to save a couple of cycles. If kprobes is enabled more than 300 cycles can be avoided if kprobes_running() is false. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/mm/fault.c | 259 ++++++++++++++++++++++++++++----------------------- 1 file changed, 141 insertions(+), 118 deletions(-) (limited to 'arch') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7462aebd3eb6..8bc35183db59 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb) return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); } -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) +static int __kprobes __notify_page_fault(struct pt_regs *regs, long err) { - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); + struct die_args args = { .str = "page fault", + .trapnr = 14, + .signr = SIGSEGV }; + args.regs = regs; + args.err = err; + return atomic_notifier_call_chain(¬ify_page_fault_chain, + DIE_PAGE_FAULT, &args); +} + +static inline int notify_page_fault(struct pt_regs *regs, long err) +{ + if (unlikely(kprobe_running())) + return __notify_page_fault(regs, err); + return NOTIFY_DONE; } #else -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) +static inline int notify_page_fault(struct pt_regs *regs, long err) { return NOTIFY_DONE; } @@ -170,6 +174,89 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, force_sig_info(SIGSEGV, &si, current); } +static void do_no_context(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + const struct exception_table_entry *fixup; + + /* Are we prepared to handle this kernel fault? */ + fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); + if (fixup) { + regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + if (check_space(current) == 0) + printk(KERN_ALERT "Unable to handle kernel pointer dereference" + " at virtual kernel address %p\n", (void *)address); + else + printk(KERN_ALERT "Unable to handle kernel paging request" + " at virtual user address %p\n", (void *)address); + + die("Oops", regs, error_code); + do_exit(SIGKILL); +} + +static void do_low_address(struct pt_regs *regs, unsigned long error_code) +{ + /* Low-address protection hit in kernel mode means + NULL pointer write access in kernel mode. */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + /* Low-address protection hit in user mode 'cannot happen'. */ + die ("Low-address protection", regs, error_code); + do_exit(SIGKILL); + } + + do_no_context(regs, error_code, 0); +} + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + if (is_init(tsk)) { + yield(); + down_read(&mm->mmap_sem); + return 1; + } + printk("VM: killing process %s\n", tsk->comm); + if (regs->psw.mask & PSW_MASK_PSTATE) + do_exit(SIGKILL); + do_no_context(regs, error_code, address); + return 0; +} + +static void do_sigbus(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; + force_sig(SIGBUS, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!(regs->psw.mask & PSW_MASK_PSTATE)) + do_no_context(regs, error_code, address); +} + #ifdef CONFIG_S390_EXEC_PROTECT extern long sys_sigreturn(struct pt_regs *regs); extern long sys_rt_sigreturn(struct pt_regs *regs); @@ -253,49 +340,23 @@ out_fault: * 3b Region third trans. -> Not present (nullification) */ static inline void -do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) +do_exception(struct pt_regs *regs, unsigned long error_code, int write) { - struct task_struct *tsk; - struct mm_struct *mm; - struct vm_area_struct * vma; - unsigned long address; - const struct exception_table_entry *fixup; - int si_code; + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + unsigned long address; int space; + int si_code; - tsk = current; - mm = tsk->mm; - - if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) + if (notify_page_fault(regs, error_code) == NOTIFY_STOP) return; - /* - * Check for low-address protection. This needs to be treated - * as a special case because the translation exception code - * field is not guaranteed to contain valid data in this case. - */ - if (is_protection && !(S390_lowcore.trans_exc_code & 4)) { - - /* Low-address protection hit in kernel mode means - NULL pointer write access in kernel mode. */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) { - address = 0; - space = 0; - goto no_context; - } - - /* Low-address protection hit in user mode 'cannot happen'. */ - die ("Low-address protection", regs, error_code); - do_exit(SIGKILL); - } + tsk = current; + mm = tsk->mm; - /* - * get the failing address - * more specific the segment and page table portion of - * the address - */ - address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; + /* get the failing address and the affected space */ + address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; space = check_space(tsk); /* @@ -313,7 +374,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) */ local_irq_enable(); - down_read(&mm->mmap_sem); + down_read(&mm->mmap_sem); si_code = SEGV_MAPERR; vma = find_vma(mm, address); @@ -330,19 +391,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) return; #endif - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: si_code = SEGV_ACCERR; - if (!is_protection) { + if (!write) { /* page not present, check vm flags */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; @@ -357,7 +418,7 @@ survive: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, is_protection)) { + switch (handle_mm_fault(mm, vma, address, write)) { case VM_FAULT_MINOR: tsk->min_flt++; break; @@ -365,9 +426,12 @@ survive: tsk->maj_flt++; break; case VM_FAULT_SIGBUS: - goto do_sigbus; + do_sigbus(regs, error_code, address); + return; case VM_FAULT_OOM: - goto out_of_memory; + if (do_out_of_memory(regs, error_code, address)) + goto survive; + return; default: BUG(); } @@ -385,75 +449,34 @@ survive: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + up_read(&mm->mmap_sem); - /* User mode accesses just cause a SIGSEGV */ - if (regs->psw.mask & PSW_MASK_PSTATE) { - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; + /* User mode accesses just cause a SIGSEGV */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; do_sigsegv(regs, error_code, si_code, address); - return; + return; } no_context: - /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); - if (fixup) { - regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - return; - } - -/* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ - if (space == 0) - printk(KERN_ALERT "Unable to handle kernel pointer dereference" - " at virtual kernel address %p\n", (void *)address); - else - printk(KERN_ALERT "Unable to handle kernel paging request" - " at virtual user address %p\n", (void *)address); - - die("Oops", regs, error_code); - do_exit(SIGKILL); - - -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. -*/ -out_of_memory: - up_read(&mm->mmap_sem); - if (is_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (regs->psw.mask & PSW_MASK_PSTATE) - do_exit(SIGKILL); - goto no_context; - -do_sigbus: - up_read(&mm->mmap_sem); - - /* - * Send a sigbus, regardless of whether we were in kernel - * or user mode. - */ - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; - force_sig(SIGBUS, tsk); - - /* Kernel mode? Handle exceptions or die */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) - goto no_context; + do_no_context(regs, error_code, address); } void __kprobes do_protection_exception(struct pt_regs *regs, unsigned long error_code) { + /* Protection exception is supressing, decrement psw address. */ regs->psw.addr -= (error_code >> 16); + /* + * Check for low-address protection. This needs to be treated + * as a special case because the translation exception code + * field is not guaranteed to contain valid data in this case. + */ + if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { + do_low_address(regs, error_code); + return; + } do_exception(regs, 4, 1); } -- cgit v1.2.3-59-g8ed1b From be5ec363e958982454ac9b3138b0e78c032e758d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:44 +0200 Subject: [S390] No execute support cleanup. Simplify the signal_return function that checks for the two special system calls sigreturn and rt_sigreturn. No need to do a page table walk, a call to copy_from_user while disabled page faults will work as well. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/mm/fault.c | 72 +++++++++++++++------------------------------------- 1 file changed, 21 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 8bc35183db59..2b76a879a7b5 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -26,9 +26,9 @@ #include #include #include +#include #include -#include #include #include #include @@ -263,68 +263,38 @@ extern long sys_rt_sigreturn(struct pt_regs *regs); extern long sys32_sigreturn(struct pt_regs *regs); extern long sys32_rt_sigreturn(struct pt_regs *regs); -static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, - int rt) +static int signal_return(struct mm_struct *mm, struct pt_regs *regs, + unsigned long address, unsigned long error_code) { + u16 instruction; + int rc, compat; + + pagefault_disable(); + rc = __get_user(instruction, (u16 __user *) regs->psw.addr); + pagefault_enable(); + if (rc) + return -EFAULT; + up_read(&mm->mmap_sem); clear_tsk_thread_flag(current, TIF_SINGLE_STEP); #ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(current, TIF_31BIT)) { - if (rt) - sys32_rt_sigreturn(regs); - else - sys32_sigreturn(regs); - return; - } -#endif /* CONFIG_COMPAT */ - if (rt) - sys_rt_sigreturn(regs); + compat = test_tsk_thread_flag(current, TIF_31BIT); + if (compat && instruction == 0x0a77) + sys32_sigreturn(regs); + else if (compat && instruction == 0x0aad) + sys32_rt_sigreturn(regs); else +#endif + if (instruction == 0x0a77) sys_sigreturn(regs); - return; -} - -static int signal_return(struct mm_struct *mm, struct pt_regs *regs, - unsigned long address, unsigned long error_code) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - u16 *instruction; - unsigned long pfn, uaddr = regs->psw.addr; - - spin_lock(&mm->page_table_lock); - pgd = pgd_offset(mm, uaddr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - goto out_fault; - pmd = pmd_offset(pgd, uaddr); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - goto out_fault; - pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr); - if (!pte || !pte_present(*pte)) - goto out_fault; - pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out_fault; - spin_unlock(&mm->page_table_lock); - - instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1))); - if (*instruction == 0x0a77) - do_sigreturn(mm, regs, 0); - else if (*instruction == 0x0aad) - do_sigreturn(mm, regs, 1); + else if (instruction == 0x0aad) + sys_rt_sigreturn(regs); else { - printk("- XXX - do_exception: task = %s, primary, NO EXEC " - "-> SIGSEGV\n", current->comm); - up_read(&mm->mmap_sem); current->thread.prot_addr = address; current->thread.trap_no = error_code; do_sigsegv(regs, error_code, SEGV_MAPERR, address); } return 0; -out_fault: - spin_unlock(&mm->page_table_lock); - return -EFAULT; } #endif /* CONFIG_S390_EXEC_PROTECT */ -- cgit v1.2.3-59-g8ed1b From be7962856d299a0f231ac36f89f4a89cbecfe0ff Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:46 +0200 Subject: [S390] Improved kernel stack overflow checking. Recent cvs versions of gcc have support for an improved stack overflow checking that calculates the size of the guard size for each function. If the compiler accepts -mstack-size without -mstack-guard then the new stack check is available. We always want to use the new stack checker. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/Makefile | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/Makefile b/arch/s390/Makefile index b1e558496469..ece5adc05606 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -67,8 +67,10 @@ endif ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) +ifneq ($(call cc-option-yn,-mstack-size=8192),y) cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) endif +endif ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack -- cgit v1.2.3-59-g8ed1b From 411ed3225733dbd83b4cbaaa992ef80d6ec1534e Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Fri, 27 Apr 2007 16:01:49 +0200 Subject: [S390] zfcpdump support. s390 machines provide hardware support for creating Linux dumps on SCSI disks. For creating a dump a special purpose dump Linux is used. The first 32 MB of memory are saved by the hardware before the dump Linux is booted. Via an SCLP interface, the saved memory can be accessed from Linux. This patch exports memory and registers of the crashed Linux to userspace via a debugfs file. For more information refer to Documentation/s390/zfcpdump.txt, which is included in this patch. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- Documentation/s390/zfcpdump.txt | 87 ++++++ arch/s390/Kconfig | 8 + arch/s390/Makefile | 3 + arch/s390/defconfig | 1 + arch/s390/kernel/head64.S | 72 ++++- arch/s390/kernel/ipl.c | 232 +++++++++----- arch/s390/kernel/setup.c | 38 ++- arch/s390/kernel/smp.c | 62 ++++ drivers/s390/char/Makefile | 3 + drivers/s390/char/sclp.h | 2 + drivers/s390/char/sclp_sdias.c | 255 ++++++++++++++++ drivers/s390/char/zcore.c | 651 ++++++++++++++++++++++++++++++++++++++++ include/asm-s390/ipl.h | 32 ++ include/asm-s390/lowcore.h | 46 +++ include/asm-s390/sclp.h | 2 + include/asm-s390/setup.h | 2 + include/asm-s390/smp.h | 1 + 17 files changed, 1411 insertions(+), 86 deletions(-) create mode 100644 Documentation/s390/zfcpdump.txt create mode 100644 drivers/s390/char/sclp_sdias.c create mode 100644 drivers/s390/char/zcore.c (limited to 'arch') diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt new file mode 100644 index 000000000000..cf45d27c4608 --- /dev/null +++ b/Documentation/s390/zfcpdump.txt @@ -0,0 +1,87 @@ +s390 SCSI dump tool (zfcpdump) + +System z machines (z900 or higher) provide hardware support for creating system +dumps on SCSI disks. The dump process is initiated by booting a dump tool, which +has to create a dump of the current (probably crashed) Linux image. In order to +not overwrite memory of the crashed Linux with data of the dump tool, the +hardware saves some memory plus the register sets of the boot cpu before the +dump tool is loaded. There exists an SCLP hardware interface to obtain the saved +memory afterwards. Currently 32 MB are saved. + +This zfcpdump implementation consists of a Linux dump kernel together with +a userspace dump tool, which are loaded together into the saved memory region +below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in +the s390-tools package) to make the device bootable. The operator of a Linux +system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump +resides on. + +The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem", +which exports memory and registers of the crashed Linux in an s390 +standalone dump format. It can be used in the same way as e.g. /dev/mem. The +dump format defines a 4K header followed by plain uncompressed memory. The +register sets are stored in the prefix pages of the respective cpus. To build a +dump enabled kernel with the zcore driver, the kernel config option +CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of +memory, which has been saved by hardware is read by the driver via the SCLP +hardware interface. The second part is just copied from the non overwritten real +memory. + +The userspace application of zfcpdump can reside e.g. in an intitramfs or an +initrd. It reads from zcore/mem and writes the system dump to a file on a +SCSI disk. + +To build a zfcpdump kernel use the following settings in your kernel +configuration: + * CONFIG_ZFCPDUMP=y + * Enable ZFCP driver + * Enable SCSI driver + * Enable ext2 and ext3 filesystems + * Disable as many features as possible to keep the kernel small. + E.g. network support is not needed at all. + +To use the zfcpdump userspace application in an initramfs you have to do the +following: + + * Copy the zfcpdump executable somewhere into your Linux tree. + E.g. to "arch/s390/boot/zfcpdump. If you do not want to include + shared libraries, compile the tool with the "-static" gcc option. + * If you want to include e2fsck, add it to your source tree, too. The zfcpdump + application attempts to start /sbin/e2fsck from the ramdisk. + * Use an initramfs config file like the following: + + dir /dev 755 0 0 + nod /dev/console 644 0 0 c 5 1 + nod /dev/null 644 0 0 c 1 3 + nod /dev/sda1 644 0 0 b 8 1 + nod /dev/sda2 644 0 0 b 8 2 + nod /dev/sda3 644 0 0 b 8 3 + nod /dev/sda4 644 0 0 b 8 4 + nod /dev/sda5 644 0 0 b 8 5 + nod /dev/sda6 644 0 0 b 8 6 + nod /dev/sda7 644 0 0 b 8 7 + nod /dev/sda8 644 0 0 b 8 8 + nod /dev/sda9 644 0 0 b 8 9 + nod /dev/sda10 644 0 0 b 8 10 + nod /dev/sda11 644 0 0 b 8 11 + nod /dev/sda12 644 0 0 b 8 12 + nod /dev/sda13 644 0 0 b 8 13 + nod /dev/sda14 644 0 0 b 8 14 + nod /dev/sda15 644 0 0 b 8 15 + file /init arch/s390/boot/zfcpdump 755 0 0 + file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0 + dir /proc 755 0 0 + dir /sys 755 0 0 + dir /mnt 755 0 0 + dir /sbin 755 0 0 + + * Issue "make image" to build the zfcpdump image with initramfs. + +In a Linux distribution the zfcpdump enabled kernel image must be copied to +/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the +dump kernel when preparing a SCSI dump disk. + +If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd". + +For more information on how to use zfcpdump refer to the s390 'Using the Dump +Tools book', which is available from +http://www.ibm.com/developerworks/linux/linux390. diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f9517bc8e70..e6ec418093e5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -519,6 +519,14 @@ config KEXEC current kernel, and to start another kernel. It is like a reboot but is independent of hardware/microcode support. +config ZFCPDUMP + tristate "zfcpdump support" + select SMP + default n + help + Select this option if you want to build an zfcpdump enabled kernel. + Refer to "Documentation/s390/zfcpdump.txt" for more details on this. + endmenu source "net/Kconfig" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index ece5adc05606..68441e0e74b6 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -105,6 +105,9 @@ install: vmlinux image: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +zfcpdump: + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 80046d9e2a3b..0e4da8a7d826 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -167,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y CONFIG_NO_IDLE_HZ_INIT=y CONFIG_S390_HYPFS_FS=y CONFIG_KEXEC=y +# CONFIG_ZFCPDUMP is not set # # Networking diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 37010709fe68..a87b1976d409 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -39,7 +39,69 @@ startup_continue: basr %r13,0 # get base .LPG1: sll %r13,1 # remove high order bit srl %r13,1 - lhi %r1,1 # mode 1 = esame + +#ifdef CONFIG_ZFCPDUMP + + # check if we have been ipled using zfcp dump: + + tm 0xb9,0x01 # test if subchannel is enabled + jno .nodump # subchannel disabled + l %r1,0xb8 + la %r5,.Lipl_schib-.LPG1(%r13) + stsch 0(%r5) # get schib of subchannel + jne .nodump # schib not available + tm 5(%r5),0x01 # devno valid? + jno .nodump + tm 4(%r5),0x80 # qdio capable device? + jno .nodump + l %r2,20(%r0) # address of ipl parameter block + lhi %r3,0 + ic %r3,0x148(%r2) # get opt field + chi %r3,0x20 # load with dump? + jne .nodump + + # store all prefix registers in case of load with dump: + + la %r7,0 # base register for 0 page + la %r8,0 # first cpu + l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array + ahi %r11,4 # skip boot cpu + lr %r12,%r11 + ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array + stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr +1: + cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ? + je 4f # if yes get next cpu +2: + lr %r9,%r7 + sigp %r9,%r8,0x9 # stop & store status of cpu + brc 8,3f # accepted + brc 4,4f # status stored: next cpu + brc 2,2b # busy: try again + brc 1,4f # not op: next cpu +3: + mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array + ahi %r11,4 # next element in prefix array + clr %r11,%r12 + je 5f # no more space in prefix array +4: + ahi %r8,1 # next cpu (r8 += 1) + cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? + jl 1b # jump if not last cpu +5: + lhi %r1,2 # mode 2 = esame (dump) + j 6f + .align 4 +.Lipl_schib: + .rept 13 + .long 0 + .endr +.nodump: + lhi %r1,1 # mode 1 = esame (normal ipl) +6: +#else + lhi %r1,1 # mode 1 = esame (normal ipl) +#endif /* CONFIG_ZFCPDUMP */ mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero sigp %r1,%r0,0x12 # switch to esame mode @@ -149,6 +211,14 @@ startup_continue: .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 +#ifdef CONFIG_ZFCPDUMP +.Lcurrent_cpu: + .long 0x0 +.Llast_cpu: + .long 0x0000ffff +.Lpref_arr_ptr: + .long zfcpdump_prefix_array +#endif /* CONFIG_ZFCPDUMP */ .Lparmaddr: .quad PARMAREA .align 64 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index a83cf1fdd8f5..06833ac2b115 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -29,36 +29,21 @@ #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) #define SCCB_FLAG (s390_readinfo_sccb.flags) -enum ipl_type { - IPL_TYPE_NONE = 1, - IPL_TYPE_UNKNOWN = 2, - IPL_TYPE_CCW = 4, - IPL_TYPE_FCP = 8, - IPL_TYPE_NSS = 16, -}; - -#define IPL_NONE_STR "none" -#define IPL_UNKNOWN_STR "unknown" -#define IPL_CCW_STR "ccw" -#define IPL_FCP_STR "fcp" -#define IPL_NSS_STR "nss" - -/* - * Must be in data section since the bss section - * is not cleared when these are accessed. - */ -static u16 ipl_devno __attribute__((__section__(".data"))) = 0; -u32 ipl_flags __attribute__((__section__(".data"))) = 0; +#define IPL_UNKNOWN_STR "unknown" +#define IPL_CCW_STR "ccw" +#define IPL_FCP_STR "fcp" +#define IPL_FCP_DUMP_STR "fcp_dump" +#define IPL_NSS_STR "nss" static char *ipl_type_str(enum ipl_type type) { switch (type) { - case IPL_TYPE_NONE: - return IPL_NONE_STR; case IPL_TYPE_CCW: return IPL_CCW_STR; case IPL_TYPE_FCP: return IPL_FCP_STR; + case IPL_TYPE_FCP_DUMP: + return IPL_FCP_DUMP_STR; case IPL_TYPE_NSS: return IPL_NSS_STR; case IPL_TYPE_UNKNOWN: @@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type) } } +enum dump_type { + DUMP_TYPE_NONE = 1, + DUMP_TYPE_CCW = 2, + DUMP_TYPE_FCP = 4, +}; + +#define DUMP_NONE_STR "none" +#define DUMP_CCW_STR "ccw" +#define DUMP_FCP_STR "fcp" + +static char *dump_type_str(enum dump_type type) +{ + switch (type) { + case DUMP_TYPE_NONE: + return DUMP_NONE_STR; + case DUMP_TYPE_CCW: + return DUMP_CCW_STR; + case DUMP_TYPE_FCP: + return DUMP_FCP_STR; + default: + return NULL; + } +} + +/* + * Must be in data section since the bss section + * is not cleared when these are accessed. + */ +static u16 ipl_devno __attribute__((__section__(".data"))) = 0; +u32 ipl_flags __attribute__((__section__(".data"))) = 0; + enum ipl_method { - IPL_METHOD_NONE, - IPL_METHOD_CCW_CIO, - IPL_METHOD_CCW_DIAG, - IPL_METHOD_CCW_VM, - IPL_METHOD_FCP_RO_DIAG, - IPL_METHOD_FCP_RW_DIAG, - IPL_METHOD_FCP_RO_VM, - IPL_METHOD_NSS, + REIPL_METHOD_CCW_CIO, + REIPL_METHOD_CCW_DIAG, + REIPL_METHOD_CCW_VM, + REIPL_METHOD_FCP_RO_DIAG, + REIPL_METHOD_FCP_RW_DIAG, + REIPL_METHOD_FCP_RO_VM, + REIPL_METHOD_FCP_DUMP, + REIPL_METHOD_NSS, + REIPL_METHOD_DEFAULT, +}; + +enum dump_method { + DUMP_METHOD_NONE, + DUMP_METHOD_CCW_CIO, + DUMP_METHOD_CCW_DIAG, + DUMP_METHOD_CCW_VM, + DUMP_METHOD_FCP_DIAG, }; enum shutdown_action { @@ -107,15 +132,15 @@ static int diag308_set_works = 0; static int reipl_capabilities = IPL_TYPE_UNKNOWN; static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; -static enum ipl_method reipl_method = IPL_METHOD_NONE; +static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static char reipl_nss_name[NSS_NAME_SIZE + 1]; -static int dump_capabilities = IPL_TYPE_NONE; -static enum ipl_type dump_type = IPL_TYPE_NONE; -static enum ipl_method dump_method = IPL_METHOD_NONE; +static int dump_capabilities = DUMP_TYPE_NONE; +static enum dump_type dump_type = DUMP_TYPE_NONE; +static enum dump_method dump_method = DUMP_METHOD_NONE; static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_ccw; @@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr) : "d" (subcode) : "cc", "memory"); return _rc; } +EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ @@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs) * ipl section */ -static enum ipl_type ipl_get_type(void) +static __init enum ipl_type get_ipl_type(void) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; @@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void) return IPL_TYPE_UNKNOWN; if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) return IPL_TYPE_UNKNOWN; + if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) + return IPL_TYPE_FCP_DUMP; return IPL_TYPE_FCP; } +void __init setup_ipl_info(void) +{ + ipl_info.type = get_ipl_type(); + switch (ipl_info.type) { + case IPL_TYPE_CCW: + ipl_info.data.ccw.dev_id.devno = ipl_devno; + ipl_info.data.ccw.dev_id.ssid = 0; + break; + case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: + ipl_info.data.fcp.dev_id.devno = + IPL_PARMBLOCK_START->ipl_info.fcp.devno; + ipl_info.data.fcp.dev_id.ssid = 0; + ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; + ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; + break; + case IPL_TYPE_NSS: + strncpy(ipl_info.data.nss.name, kernel_nss_name, + sizeof(ipl_info.data.nss.name)); + break; + case IPL_TYPE_UNKNOWN: + default: + /* We have no info to copy */ + break; + } +} + +struct ipl_info ipl_info; +EXPORT_SYMBOL_GPL(ipl_info); + static ssize_t ipl_type_show(struct subsystem *subsys, char *page) { - return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); + return sprintf(page, "%s\n", ipl_type_str(ipl_info.type)); } static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); @@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: return sprintf(page, "0.0.%04x\n", ipl_devno); case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); default: return 0; @@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type) switch(type) { case IPL_TYPE_CCW: if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_CCW_VM; + reipl_method = REIPL_METHOD_CCW_VM; else - reipl_method = IPL_METHOD_CCW_CIO; + reipl_method = REIPL_METHOD_CCW_CIO; break; case IPL_TYPE_FCP: if (diag308_set_works) - reipl_method = IPL_METHOD_FCP_RW_DIAG; + reipl_method = REIPL_METHOD_FCP_RW_DIAG; else if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_FCP_RO_VM; + reipl_method = REIPL_METHOD_FCP_RO_VM; else - reipl_method = IPL_METHOD_FCP_RO_DIAG; + reipl_method = REIPL_METHOD_FCP_RO_DIAG; + break; + case IPL_TYPE_FCP_DUMP: + reipl_method = REIPL_METHOD_FCP_DUMP; break; case IPL_TYPE_NSS: - reipl_method = IPL_METHOD_NSS; + reipl_method = REIPL_METHOD_NSS; + break; + case IPL_TYPE_UNKNOWN: + reipl_method = REIPL_METHOD_DEFAULT; break; default: - reipl_method = IPL_METHOD_NONE; + BUG(); } reipl_type = type; return 0; @@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = { /* dump type */ -static int dump_set_type(enum ipl_type type) +static int dump_set_type(enum dump_type type) { if (!(dump_capabilities & type)) return -EINVAL; switch(type) { - case IPL_TYPE_CCW: + case DUMP_TYPE_CCW: if (MACHINE_IS_VM) - dump_method = IPL_METHOD_CCW_VM; + dump_method = DUMP_METHOD_CCW_VM; else - dump_method = IPL_METHOD_CCW_CIO; + dump_method = DUMP_METHOD_CCW_CIO; break; - case IPL_TYPE_FCP: - dump_method = IPL_METHOD_FCP_RW_DIAG; + case DUMP_TYPE_FCP: + dump_method = DUMP_METHOD_FCP_DIAG; break; default: - dump_method = IPL_METHOD_NONE; + dump_method = DUMP_METHOD_NONE; } dump_type = type; return 0; @@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type) static ssize_t dump_type_show(struct subsystem *subsys, char *page) { - return sprintf(page, "%s\n", ipl_type_str(dump_type)); + return sprintf(page, "%s\n", dump_type_str(dump_type)); } static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, @@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, { int rc = -EINVAL; - if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) - rc = dump_set_type(IPL_TYPE_NONE); - else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) - rc = dump_set_type(IPL_TYPE_CCW); - else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) - rc = dump_set_type(IPL_TYPE_FCP); + if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_NONE); + else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_CCW); + else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_FCP); return (rc != 0) ? rc : len; } @@ -664,14 +729,14 @@ void do_reipl(void) char loadparm[LOADPARM_LEN + 1]; switch (reipl_method) { - case IPL_METHOD_CCW_CIO: + case REIPL_METHOD_CCW_CIO: devid.devno = reipl_block_ccw->ipl_info.ccw.devno; - if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) + if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno) diag308(DIAG308_IPL, NULL); devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case REIPL_METHOD_CCW_VM: reipl_get_ascii_loadparm(loadparm); if (strlen(loadparm) == 0) sprintf(buf, "IPL %X", @@ -681,30 +746,32 @@ void do_reipl(void) reipl_block_ccw->ipl_info.ccw.devno, loadparm); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case REIPL_METHOD_CCW_DIAG: diag308(DIAG308_SET, reipl_block_ccw); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case REIPL_METHOD_FCP_RW_DIAG: diag308(DIAG308_SET, reipl_block_fcp); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_DIAG: + case REIPL_METHOD_FCP_RO_DIAG: diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_VM: + case REIPL_METHOD_FCP_RO_VM: __cpcmd("IPL", NULL, 0, NULL); break; - case IPL_METHOD_NSS: + case REIPL_METHOD_NSS: sprintf(buf, "IPL %s", reipl_nss_name); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_NONE: - default: + case REIPL_METHOD_DEFAULT: if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diag308(DIAG308_IPL, NULL); break; + case REIPL_METHOD_FCP_DUMP: + default: + break; } signal_processor(smp_processor_id(), sigp_stop_and_store_status); } @@ -715,28 +782,28 @@ static void do_dump(void) static char buf[100]; switch (dump_method) { - case IPL_METHOD_CCW_CIO: + case DUMP_METHOD_CCW_CIO: smp_send_stop(); devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case DUMP_METHOD_CCW_VM: smp_send_stop(); sprintf(buf, "STORE STATUS"); __cpcmd(buf, NULL, 0, NULL); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case DUMP_METHOD_CCW_DIAG: diag308(DIAG308_SET, dump_block_ccw); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case DUMP_METHOD_FCP_DIAG: diag308(DIAG308_SET, dump_block_fcp); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_NONE: + case DUMP_METHOD_NONE: default: return; } @@ -777,12 +844,13 @@ static int __init ipl_init(void) rc = firmware_register(&ipl_subsys); if (rc) return rc; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: rc = sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); break; case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: rc = ipl_register_fcp_files(); break; case IPL_TYPE_NSS: @@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void) /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ if (!MACHINE_IS_VM) sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; - if (ipl_get_type() == IPL_TYPE_CCW) + if (ipl_info.type == IPL_TYPE_CCW) reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; reipl_capabilities |= IPL_TYPE_CCW; return 0; @@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void) { int rc; - if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) return 0; - if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) make_attrs_ro(reipl_fcp_attrs); reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); @@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void) free_page((unsigned long)reipl_block_fcp); return rc; } - if (ipl_get_type() == IPL_TYPE_FCP) { + if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); } else { reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; @@ -909,7 +977,7 @@ static int __init reipl_init(void) rc = reipl_nss_init(); if (rc) return rc; - rc = reipl_set_type(ipl_get_type()); + rc = reipl_set_type(ipl_info.type); if (rc) return rc; return 0; @@ -931,7 +999,7 @@ static int __init dump_ccw_init(void) dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; - dump_capabilities |= IPL_TYPE_CCW; + dump_capabilities |= DUMP_TYPE_CCW; return 0; } @@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void) dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; - dump_capabilities |= IPL_TYPE_FCP; + dump_capabilities |= DUMP_TYPE_FCP; return 0; } @@ -995,7 +1063,7 @@ static int __init dump_init(void) rc = dump_fcp_init(); if (rc) return rc; - dump_set_type(IPL_TYPE_NONE); + dump_set_type(DUMP_TYPE_NONE); return 0; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 863c8d08c026..3dfd0985861c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -285,6 +285,26 @@ static void __init conmode_default(void) } } +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +static void __init setup_zfcpdump(unsigned int console_devno) +{ + static char str[64]; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + if (console_devno != -1) + sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno, console_devno); + else + sprintf(str, "cio_ignore=all,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno); + strcat(COMMAND_LINE, str); + console_loglevel = 2; +} +#else +static inline void setup_zfcpdump(unsigned int console_devno) {} +#endif /* CONFIG_ZFCPDUMP */ + #ifdef CONFIG_SMP void (*_machine_restart)(char *command) = machine_restart_smp; void (*_machine_halt)(void) = machine_halt_smp; @@ -586,13 +606,20 @@ setup_resources(void) } } +unsigned long real_memory_size; +EXPORT_SYMBOL_GPL(real_memory_size); + static void __init setup_memory_end(void) { - unsigned long real_size, memory_size; + unsigned long memory_size; unsigned long max_mem, max_phys; int i; - memory_size = real_size = 0; +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + if (ipl_info.type == IPL_TYPE_FCP_DUMP) + memory_end = ZFCPDUMP_HSA_SIZE; +#endif + memory_size = 0; max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; memory_end &= PAGE_MASK; @@ -601,7 +628,8 @@ static void __init setup_memory_end(void) for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; - real_size = max(real_size, chunk->addr + chunk->size); + real_memory_size = max(real_memory_size, + chunk->addr + chunk->size); if (chunk->addr >= max_mem) { memset(chunk, 0, sizeof(*chunk)); continue; @@ -765,6 +793,7 @@ setup_arch(char **cmdline_p) parse_early_param(); + setup_ipl_info(); setup_memory_end(); setup_addressing_mode(); setup_memory(); @@ -782,6 +811,9 @@ setup_arch(char **cmdline_p) /* Setup default console */ conmode_default(); + + /* Setup zfcpdump support */ + setup_zfcpdump(console_devno); } void print_cpu_info(struct cpuinfo_S390 *cpuinfo) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 97764f710bb7..7c0143fdf710 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include extern volatile int __cpu_logical_map[]; @@ -395,6 +397,65 @@ void smp_ctl_clear_bit(int cr, int bit) on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + +/* + * zfcpdump_prefix_array holds prefix registers for the following scenario: + * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to + * save its prefix registers, since they get lost, when switching from 31 bit + * to 64 bit. + */ +unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ + __attribute__((__section__(".data"))); + +static void __init smp_get_save_areas(void) +{ + unsigned int cpu, cpu_num, rc; + __u16 boot_cpu_addr; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + cpu_num = 1; + for (cpu = 0; cpu <= 65535; cpu++) { + if ((u16) cpu == boot_cpu_addr) + continue; + __cpu_logical_map[1] = (__u16) cpu; + if (signal_processor(1, sigp_sense) == sigp_not_operational) + continue; + if (cpu_num >= NR_CPUS) { + printk("WARNING: Registers for cpu %i are not " + "saved, since dump kernel was compiled with" + "NR_CPUS=%i!\n", cpu_num, NR_CPUS); + continue; + } + zfcpdump_save_areas[cpu_num] = + alloc_bootmem(sizeof(union save_area)); + while (1) { + rc = signal_processor(1, sigp_stop_and_store_status); + if (rc != sigp_busy) + break; + cpu_relax(); + } + memcpy(zfcpdump_save_areas[cpu_num], + (void *)(unsigned long) store_prefix() + + SAVE_AREA_BASE, SAVE_AREA_SIZE); +#ifdef __s390x__ + /* copy original prefix register */ + zfcpdump_save_areas[cpu_num]->s390x.pref_reg = + zfcpdump_prefix_array[cpu_num]; +#endif + cpu_num++; + } +} + +union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +EXPORT_SYMBOL_GPL(zfcpdump_save_areas); + +#else +#define smp_get_save_areas() do { } while (0) +#endif + /* * Lets check how many CPUs we have. */ @@ -589,6 +650,7 @@ void __init smp_setup_cpu_possible_map(void) { unsigned int phy_cpus, pos_cpus, cpu; + smp_get_save_areas(); phy_cpus = smp_count_cpus(); pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 5fd581c22db3..a0f6db21855a 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o obj-$(CONFIG_MONREADER) += monreader.o obj-$(CONFIG_MONWRITER) += monwriter.o + +zcore_mod-objs := sclp_sdias.o zcore.o +obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7d29ab45a6ed..6402e943436f 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -27,6 +27,7 @@ #define EvTyp_CntlProgIdent 0x0B #define EvTyp_SigQuiesce 0x1D #define EvTyp_VT220Msg 0x1A +#define EvTyp_SDIAS 0x1C #define EvTyp_OpCmd_Mask 0x80000000 #define EvTyp_Msg_Mask 0x40000000 @@ -36,6 +37,7 @@ #define EvTyp_CtlProgIdent_Mask 0x00200000 #define EvTyp_SigQuiesce_Mask 0x00000008 #define EvTyp_VT220Msg_Mask 0x00000040 +#define EvTyp_SDIAS_Mask 0x00000010 #define GnrlMsgFlgs_DOM 0x8000 #define GnrlMsgFlgs_SndAlrm 0x4000 diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c new file mode 100644 index 000000000000..06a4d0897232 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.c @@ -0,0 +1,255 @@ +/* + * Sclp "store data in absolut storage" + * + * Copyright IBM Corp. 2003,2007 + * Author(s): Michael Holzheu + */ + +#include +#include +#include +#include +#include "sclp.h" +#include "sclp_rw.h" + +#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) +#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) + +#define SDIAS_RETRIES 300 +#define SDIAS_SLEEP_TICKS 50 + +#define EQ_STORE_DATA 0x0 +#define EQ_SIZE 0x1 +#define DI_FCP_DUMP 0x0 +#define ASA_SIZE_32 0x0 +#define ASA_SIZE_64 0x1 +#define EVSTATE_ALL_STORED 0x0 +#define EVSTATE_NO_DATA 0x3 +#define EVSTATE_PART_STORED 0x10 + +static struct debug_info *sdias_dbf; + +static struct sclp_register sclp_sdias_register = { + .send_mask = EvTyp_SDIAS_Mask, +}; + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __attribute__((packed)); + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __attribute__((packed)); + +static struct sdias_sccb sccb __attribute__((aligned(4096))); + +static int sclp_req_done; +static wait_queue_head_t sdias_wq; +static DEFINE_MUTEX(sdias_mutex); + +static void sdias_callback(struct sclp_req *request, void *data) +{ + struct sdias_sccb *sccb; + + sccb = (struct sdias_sccb *) request->sccb; + sclp_req_done = 1; + wake_up(&sdias_wq); /* Inform caller, that request is complete */ + TRACE("callback done\n"); +} + +static int sdias_sclp_send(struct sclp_req *req) +{ + int retries; + int rc; + + for (retries = SDIAS_RETRIES; retries; retries--) { + sclp_req_done = 0; + TRACE("add request\n"); + rc = sclp_add_request(req); + if (rc) { + /* not initiated, wait some time and retry */ + set_current_state(TASK_INTERRUPTIBLE); + TRACE("add request failed: rc = %i\n",rc); + schedule_timeout(SDIAS_SLEEP_TICKS); + continue; + } + /* initiated, wait for completion of service call */ + wait_event(sdias_wq, (sclp_req_done == 1)); + if (req->status == SCLP_REQ_FAILED) { + TRACE("sclp request failed\n"); + rc = -EIO; + continue; + } + TRACE("request done\n"); + break; + } + return rc; +} + +/* + * Get number of blocks (4K) available in the HSA + */ +int sclp_sdias_blk_count(void) +{ + struct sclp_req request; + int rc; + + mutex_lock(&sdias_mutex); + + memset(&sccb, 0, sizeof(sccb)); + memset(&request, 0, sizeof(request)); + + sccb.hdr.length = sizeof(sccb); + sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb.evbuf.hdr.type = EvTyp_SDIAS; + sccb.evbuf.event_qual = EQ_SIZE; + sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_id = 4712; + sccb.evbuf.dbs = 1; + + request.sccb = &sccb; + request.command = SCLP_CMDW_WRITE_EVENT_DATA; + request.status = SCLP_REQ_FILLED; + request.callback = sdias_callback; + + rc = sdias_sclp_send(&request); + if (rc) { + ERROR_MSG("sclp_send failed for get_nr_blocks\n"); + goto out; + } + if (sccb.hdr.response_code != 0x0020) { + TRACE("send failed: %x\n", sccb.hdr.response_code); + rc = -EIO; + goto out; + } + + switch (sccb.evbuf.event_status) { + case 0: + rc = sccb.evbuf.blk_cnt; + break; + default: + ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); + rc = -EIO; + goto out; + } + TRACE("%i blocks\n", rc); +out: + mutex_unlock(&sdias_mutex); + return rc; +} + +/* + * Copy from HSA to absolute storage (not reentrant): + * + * @dest : Address of buffer where data should be copied + * @start_blk: Start Block (beginning with 1) + * @nr_blks : Number of 4K blocks to copy + * + * Return Value: 0 : Requested 'number' of blocks of data copied + * <0: ERROR - negative event status + */ +int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) +{ + struct sclp_req request; + int rc; + + mutex_lock(&sdias_mutex); + + memset(&sccb, 0, sizeof(sccb)); + memset(&request, 0, sizeof(request)); + + sccb.hdr.length = sizeof(sccb); + sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb.evbuf.hdr.type = EvTyp_SDIAS; + sccb.evbuf.hdr.flags = 0; + sccb.evbuf.event_qual = EQ_STORE_DATA; + sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_id = 4712; +#ifdef __s390x__ + sccb.evbuf.asa_size = ASA_SIZE_64; +#else + sccb.evbuf.asa_size = ASA_SIZE_32; +#endif + sccb.evbuf.event_status = 0; + sccb.evbuf.blk_cnt = nr_blks; + sccb.evbuf.asa = (unsigned long)dest; + sccb.evbuf.fbn = start_blk; + sccb.evbuf.lbn = 0; + sccb.evbuf.dbs = 1; + + request.sccb = &sccb; + request.command = SCLP_CMDW_WRITE_EVENT_DATA; + request.status = SCLP_REQ_FILLED; + request.callback = sdias_callback; + + rc = sdias_sclp_send(&request); + if (rc) { + ERROR_MSG("sclp_send failed: %x\n", rc); + goto out; + } + if (sccb.hdr.response_code != 0x0020) { + TRACE("copy failed: %x\n", sccb.hdr.response_code); + rc = -EIO; + goto out; + } + + switch (sccb.evbuf.event_status) { + case EVSTATE_ALL_STORED: + TRACE("all stored\n"); + case EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); + break; + case EVSTATE_NO_DATA: + TRACE("no data\n"); + default: + ERROR_MSG("Error from SCLP while copying hsa. " + "Event status = %x\n", + sccb.evbuf.event_status); + rc = -EIO; + } +out: + mutex_unlock(&sdias_mutex); + return rc; +} + +int __init sdias_init(void) +{ + int rc; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return 0; + sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); + debug_register_view(sdias_dbf, &debug_sprintf_view); + debug_set_level(sdias_dbf, 6); + rc = sclp_register(&sclp_sdias_register); + if (rc) { + ERROR_MSG("sclp register failed\n"); + return rc; + } + init_waitqueue_head(&sdias_wq); + TRACE("init done\n"); + return 0; +} + +void __exit sdias_exit(void) +{ + debug_unregister(sdias_dbf); + sclp_unregister(&sclp_sdias_register); +} diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c new file mode 100644 index 000000000000..89d439316a53 --- /dev/null +++ b/drivers/s390/char/zcore.c @@ -0,0 +1,651 @@ +/* + * zcore module to export memory content and register sets for creating system + * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same + * dump format as s390 standalone dumps. + * + * For more information please refer to Documentation/s390/zfcpdump.txt + * + * Copyright IBM Corp. 2003,2007 + * Author(s): Michael Holzheu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) +#define MSG(x...) printk( KERN_ALERT x ) +#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) + +#define TO_USER 0 +#define TO_KERNEL 1 + +enum arch_id { + ARCH_S390 = 0, + ARCH_S390X = 1, +}; + +/* dump system info */ + +struct sys_info { + enum arch_id arch; + unsigned long sa_base; + u32 sa_size; + int cpu_map[NR_CPUS]; + unsigned long mem_size; + union save_area lc_mask; +}; + +static struct sys_info sys_info; +static struct debug_info *zcore_dbf; +static int hsa_available; +static struct dentry *zcore_dir; +static struct dentry *zcore_file; + +/* + * Copy memory from HSA to kernel or user memory (not reentrant): + * + * @dest: Kernel or user buffer where memory should be copied to + * @src: Start address within HSA where data should be copied + * @count: Size of buffer, which should be copied + * @mode: Either TO_KERNEL or TO_USER + */ +static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) +{ + int offs, blk_num; + static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + + if (count == 0) + return 0; + + /* copy first block */ + offs = 0; + if ((src % PAGE_SIZE) != 0) { + blk_num = src / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count); + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest, + buf + (src % PAGE_SIZE), offs)) + return -EFAULT; + } else + memcpy(dest, buf + (src % PAGE_SIZE), offs); + } + if (offs == count) + goto out; + + /* copy middle */ + for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) { + blk_num = (src + offs) / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest + offs, + buf, PAGE_SIZE)) + return -EFAULT; + } else + memcpy(dest + offs, buf, PAGE_SIZE); + } + if (offs == count) + goto out; + + /* copy last block */ + blk_num = (src + offs) / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest + offs, buf, + PAGE_SIZE)) + return -EFAULT; + } else + memcpy(dest + offs, buf, count - offs); +out: + return 0; +} + +static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) +{ + return memcpy_hsa((void __force *) dest, src, count, TO_USER); +} + +static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) +{ + return memcpy_hsa(dest, src, count, TO_KERNEL); +} + +static int memcpy_real(void *dest, unsigned long src, size_t count) +{ + unsigned long flags; + int rc = -EFAULT; + register unsigned long _dest asm("2") = (unsigned long) dest; + register unsigned long _len1 asm("3") = (unsigned long) count; + register unsigned long _src asm("4") = src; + register unsigned long _len2 asm("5") = (unsigned long) count; + + if (count == 0) + return 0; + flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */ + asm volatile ( + "0: mvcle %1,%2,0x0\n" + "1: jo 0b\n" + " lhi %0,0x0\n" + "2:\n" + EX_TABLE(1b,2b) + : "+d" (rc) + : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2) + : "cc", "memory"); + __raw_local_irq_ssm(flags); + + return rc; +} + +static int memcpy_real_user(__user void *dest, unsigned long src, size_t count) +{ + static char buf[4096]; + int offs = 0, size; + + while (offs < count) { + size = min(sizeof(buf), count - offs); + if (memcpy_real(buf, src + offs, size)) + return -EFAULT; + if (copy_to_user(dest + offs, buf, size)) + return -EFAULT; + offs += size; + } + return 0; +} + +#ifdef __s390x__ +/* + * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info + */ +static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, + int cpu) +{ + int i; + + for (i = 0; i < 16; i++) { + out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; + out->s390.acc_regs[i] = in->s390x.acc_regs[i]; + out->s390.ctrl_regs[i] = + in->s390x.ctrl_regs[i] & 0x00000000ffffffff; + } + /* locore for 31 bit has only space for fpregs 0,2,4,6 */ + out->s390.fp_regs[0] = in->s390x.fp_regs[0]; + out->s390.fp_regs[1] = in->s390x.fp_regs[2]; + out->s390.fp_regs[2] = in->s390x.fp_regs[4]; + out->s390.fp_regs[3] = in->s390x.fp_regs[6]; + memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); + out->s390.psw[1] |= 0x8; /* set bit 12 */ + memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); + out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ + out->s390.pref_reg = in->s390x.pref_reg; + out->s390.timer = in->s390x.timer; + out->s390.clk_cmp = in->s390x.clk_cmp; +} + +static void __init s390x_to_s390_save_areas(void) +{ + int i = 1; + static union save_area tmp; + + while (zfcpdump_save_areas[i]) { + s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); + memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); + i++; + } +} + +#endif /* __s390x__ */ + +static int __init init_cpu_info(enum arch_id arch) +{ + union save_area *sa; + + /* get info for boot cpu from lowcore, stored in the HSA */ + + sa = kmalloc(sizeof(*sa), GFP_KERNEL); + if (!sa) { + ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); + return -ENOMEM; + } + if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { + ERROR_MSG("could not copy from HSA\n"); + kfree(sa); + return -EIO; + } + zfcpdump_save_areas[0] = sa; + +#ifdef __s390x__ + /* convert s390x regs to s390, if we are dumping an s390 Linux */ + + if (arch == ARCH_S390) + s390x_to_s390_save_areas(); +#endif + + return 0; +} + +static DEFINE_MUTEX(zcore_mutex); + +#define DUMP_VERSION 0x3 +#define DUMP_MAGIC 0xa8190173618f23fdULL +#define DUMP_ARCH_S390X 2 +#define DUMP_ARCH_S390 1 +#define HEADER_SIZE 4096 + +/* dump header dumped according to s390 crash dump format */ + +struct zcore_header { + u64 magic; + u32 version; + u32 header_size; + u32 dump_level; + u32 page_size; + u64 mem_size; + u64 mem_start; + u64 mem_end; + u32 num_pages; + u32 pad1; + u64 tod; + cpuid_t cpu_id; + u32 arch_id; + u32 build_arch; + char pad2[4016]; +} __attribute__((packed,__aligned__(16))); + +static struct zcore_header zcore_header = { + .magic = DUMP_MAGIC, + .version = DUMP_VERSION, + .header_size = 4096, + .dump_level = 0, + .page_size = PAGE_SIZE, + .mem_start = 0, +#ifdef __s390x__ + .build_arch = DUMP_ARCH_S390X, +#else + .build_arch = DUMP_ARCH_S390, +#endif +}; + +/* + * Copy lowcore info to buffer. Use map in order to copy only register parts. + * + * @buf: User buffer + * @sa: Pointer to save area + * @sa_off: Offset in save area to copy + * @len: Number of bytes to copy + */ +static int copy_lc(void __user *buf, void *sa, int sa_off, int len) +{ + int i; + char *lc_mask = (char*)&sys_info.lc_mask; + + for (i = 0; i < len; i++) { + if (!lc_mask[i + sa_off]) + continue; + if (copy_to_user(buf + i, sa + sa_off + i, 1)) + return -EFAULT; + } + return 0; +} + +/* + * Copy lowcores info to memory, if necessary + * + * @buf: User buffer + * @addr: Start address of buffer in dump memory + * @count: Size of buffer + */ +static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) +{ + unsigned long end; + int i = 0; + + if (count == 0) + return 0; + + end = start + count; + while (zfcpdump_save_areas[i]) { + unsigned long cp_start, cp_end; /* copy range */ + unsigned long sa_start, sa_end; /* save area range */ + unsigned long prefix; + unsigned long sa_off, len, buf_off; + + if (sys_info.arch == ARCH_S390) + prefix = zfcpdump_save_areas[i]->s390.pref_reg; + else + prefix = zfcpdump_save_areas[i]->s390x.pref_reg; + + sa_start = prefix + sys_info.sa_base; + sa_end = prefix + sys_info.sa_base + sys_info.sa_size; + + if ((end < sa_start) || (start > sa_end)) + goto next; + cp_start = max(start, sa_start); + cp_end = min(end, sa_end); + + buf_off = cp_start - start; + sa_off = cp_start - sa_start; + len = cp_end - cp_start; + + TRACE("copy_lc for: %lx\n", start); + if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) + return -EFAULT; +next: + i++; + } + return 0; +} + +/* + * Read routine for zcore character device + * First 4K are dump header + * Next 32MB are HSA Memory + * Rest is read from absolute Memory + */ +static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned long mem_start; /* Start address in memory */ + size_t mem_offs; /* Offset in dump memory */ + size_t hdr_count; /* Size of header part of output buffer */ + size_t size; + int rc; + + mutex_lock(&zcore_mutex); + + if (*ppos > (sys_info.mem_size + HEADER_SIZE)) { + rc = -EINVAL; + goto fail; + } + + count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos)); + + /* Copy dump header */ + if (*ppos < HEADER_SIZE) { + size = min(count, (size_t) (HEADER_SIZE - *ppos)); + if (copy_to_user(buf, &zcore_header + *ppos, size)) { + rc = -EFAULT; + goto fail; + } + hdr_count = size; + mem_start = 0; + } else { + hdr_count = 0; + mem_start = *ppos - HEADER_SIZE; + } + + mem_offs = 0; + + /* Copy from HSA data */ + if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { + size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE + - mem_start)); + rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); + if (rc) + goto fail; + + mem_offs += size; + } + + /* Copy from real mem */ + size = count - mem_offs - hdr_count; + rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, + size); + if (rc) + goto fail; + + /* + * Since s390 dump analysis tools like lcrash or crash + * expect register sets in the prefix pages of the cpus, + * we copy them into the read buffer, if necessary. + * buf + hdr_count: Start of memory part of output buffer + * mem_start: Start memory address to copy from + * count - hdr_count: Size of memory area to copy + */ + if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) { + rc = -EFAULT; + goto fail; + } + *ppos += count; +fail: + mutex_unlock(&zcore_mutex); + return (rc < 0) ? rc : count; +} + +static int zcore_open(struct inode *inode, struct file *filp) +{ + if (!hsa_available) + return -ENODATA; + else + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +static int zcore_release(struct inode *inode, struct file *filep) +{ + diag308(DIAG308_REL_HSA, NULL); + hsa_available = 0; + return 0; +} + +static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) +{ + loff_t rc; + + mutex_lock(&zcore_mutex); + switch (orig) { + case 0: + file->f_pos = offset; + rc = file->f_pos; + break; + case 1: + file->f_pos += offset; + rc = file->f_pos; + break; + default: + rc = -EINVAL; + } + mutex_unlock(&zcore_mutex); + return rc; +} + +static struct file_operations zcore_fops = { + .owner = THIS_MODULE, + .llseek = zcore_lseek, + .read = zcore_read, + .open = zcore_open, + .release = zcore_release, +}; + + +static void __init set_s390_lc_mask(union save_area *map) +{ + memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); + memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); + memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); + memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); + memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); + memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); + memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); + memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); + memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); +} + +static void __init set_s390x_lc_mask(union save_area *map) +{ + memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); + memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); + memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); + memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); + memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); + memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); + memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); + memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); + memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); + memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); +} + +/* + * Initialize dump globals for a given architecture + */ +static int __init sys_info_init(enum arch_id arch) +{ + switch (arch) { + case ARCH_S390X: + MSG("DETECTED 'S390X (64 bit) OS'\n"); + sys_info.sa_base = SAVE_AREA_BASE_S390X; + sys_info.sa_size = sizeof(struct save_area_s390x); + set_s390x_lc_mask(&sys_info.lc_mask); + break; + case ARCH_S390: + MSG("DETECTED 'S390 (32 bit) OS'\n"); + sys_info.sa_base = SAVE_AREA_BASE_S390; + sys_info.sa_size = sizeof(struct save_area_s390); + set_s390_lc_mask(&sys_info.lc_mask); + break; + default: + ERROR_MSG("unknown architecture 0x%x.\n",arch); + return -EINVAL; + } + sys_info.arch = arch; + if (init_cpu_info(arch)) { + ERROR_MSG("get cpu info failed\n"); + return -ENOMEM; + } + sys_info.mem_size = real_memory_size; + + return 0; +} + +static int __init check_sdias(void) +{ + int rc, act_hsa_size; + + rc = sclp_sdias_blk_count(); + if (rc < 0) { + ERROR_MSG("Could not determine HSA size\n"); + return rc; + } + act_hsa_size = (rc - 1) * PAGE_SIZE; + if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { + ERROR_MSG("HSA size too small: %i\n", act_hsa_size); + return -EINVAL; + } + return 0; +} + +static void __init zcore_header_init(int arch, struct zcore_header *hdr) +{ + if (arch == ARCH_S390X) + hdr->arch_id = DUMP_ARCH_S390X; + else + hdr->arch_id = DUMP_ARCH_S390; + hdr->mem_size = sys_info.mem_size; + hdr->mem_end = sys_info.mem_size; + hdr->num_pages = sys_info.mem_size / PAGE_SIZE; + hdr->tod = get_clock(); + get_cpu_id(&hdr->cpu_id); +} + +extern int sdias_init(void); + +static int __init zcore_init(void) +{ + unsigned char arch; + int rc; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return -ENODATA; + + zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); + debug_register_view(zcore_dbf, &debug_sprintf_view); + debug_set_level(zcore_dbf, 6); + + TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); + TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); + TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); + + rc = sdias_init(); + if (rc) + goto fail; + + rc = check_sdias(); + if (rc) { + ERROR_MSG("Dump initialization failed\n"); + goto fail; + } + + rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); + if (rc) { + ERROR_MSG("sdial memcpy for arch id failed\n"); + goto fail; + } + +#ifndef __s390x__ + if (arch == ARCH_S390X) { + ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); + rc = -EINVAL; + goto fail; + } +#endif + + rc = sys_info_init(arch); + if (rc) { + ERROR_MSG("arch init failed\n"); + goto fail; + } + + zcore_header_init(arch, &zcore_header); + + zcore_dir = debugfs_create_dir("zcore" , NULL); + if (!zcore_dir) { + rc = -ENOMEM; + goto fail; + } + zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, + &zcore_fops); + if (!zcore_file) { + debugfs_remove(zcore_dir); + rc = -ENOMEM; + goto fail; + } + hsa_available = 1; + return 0; + +fail: + diag308(DIAG308_REL_HSA, NULL); + return rc; +} + +extern void sdias_exit(void); + +static void __exit zcore_exit(void) +{ + debug_unregister(zcore_dbf); + sdias_exit(); + diag308(DIAG308_REL_HSA, NULL); +} + +MODULE_AUTHOR("Copyright IBM Corp. 2003,2007"); +MODULE_DESCRIPTION("zcore module for zfcpdump support"); +MODULE_LICENSE("GPL"); + +subsys_initcall(zcore_init); +module_exit(zcore_exit); diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 15bb0b529551..bdcd448d43fb 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h @@ -8,6 +8,8 @@ #define _ASM_S390_IPL_H #include +#include +#include #define IPL_PARMBLOCK_ORIGIN 0x2000 @@ -79,6 +81,7 @@ struct ipl_parameter_block { extern u32 ipl_flags; extern u32 dump_prefix_page; + extern void do_reipl(void); extern void ipl_save_parameters(void); @@ -88,6 +91,35 @@ enum { IPL_NSS_VALID = 4, }; +enum ipl_type { + IPL_TYPE_UNKNOWN = 1, + IPL_TYPE_CCW = 2, + IPL_TYPE_FCP = 4, + IPL_TYPE_FCP_DUMP = 8, + IPL_TYPE_NSS = 16, +}; + +struct ipl_info +{ + enum ipl_type type; + union { + struct { + struct ccw_dev_id dev_id; + } ccw; + struct { + struct ccw_dev_id dev_id; + u64 wwpn; + u64 lun; + } fcp; + struct { + char name[NSS_NAME_SIZE + 1]; + } nss; + } data; +}; + +extern struct ipl_info ipl_info; +extern void setup_ipl_info(void); + /* * DIAG 308 support */ diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 4a31d0a7ee83..ffc9788a21a7 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -147,6 +147,52 @@ void pgm_check_handler(void); void mcck_int_handler(void); void io_int_handler(void); +struct save_area_s390 { + u32 ext_save; + u64 timer; + u64 clk_cmp; + u8 pad1[24]; + u8 psw[8]; + u32 pref_reg; + u8 pad2[20]; + u32 acc_regs[16]; + u64 fp_regs[4]; + u32 gp_regs[16]; + u32 ctrl_regs[16]; +} __attribute__((packed)); + +struct save_area_s390x { + u64 fp_regs[16]; + u64 gp_regs[16]; + u8 psw[16]; + u8 pad1[8]; + u32 pref_reg; + u32 fp_ctrl_reg; + u8 pad2[4]; + u32 tod_reg; + u64 timer; + u64 clk_cmp; + u8 pad3[8]; + u32 acc_regs[16]; + u64 ctrl_regs[16]; +} __attribute__((packed)); + +union save_area { + struct save_area_s390 s390; + struct save_area_s390x s390x; +}; + +#define SAVE_AREA_BASE_S390 0xd4 +#define SAVE_AREA_BASE_S390X 0x1200 + +#ifndef __s390x__ +#define SAVE_AREA_SIZE sizeof(struct save_area_s390) +#define SAVE_AREA_BASE SAVE_AREA_BASE_S390 +#else +#define SAVE_AREA_SIZE sizeof(struct save_area_s390x) +#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X +#endif + struct _lowcore { #ifndef __s390x__ diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index 3996daaa8f54..21ed64773210 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h @@ -44,6 +44,8 @@ struct sclp_chp_info { extern struct sclp_readinfo_sccb s390_readinfo_sccb; extern void sclp_readinfo_early(void); +extern int sclp_sdias_blk_count(void); +extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); extern int sclp_chp_configure(struct chp_id chpid); extern int sclp_chp_deconfigure(struct chp_id chpid); extern int sclp_chp_read_info(struct sclp_chp_info *info); diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index 44c7aee2bd34..a76a6b8fd887 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h @@ -40,6 +40,7 @@ struct mem_chunk { }; extern struct mem_chunk memory_chunk[]; +extern unsigned long real_memory_size; #ifdef CONFIG_S390_SWITCH_AMODE extern unsigned int switch_amode; @@ -77,6 +78,7 @@ extern unsigned long machine_flags; #endif /* __s390x__ */ #define MACHINE_HAS_SCLP (!MACHINE_IS_P390) +#define ZFCPDUMP_HSA_SIZE (32UL<<20) /* * Console mode. Override with conmode= diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index b957e4cda464..676e94ee15f0 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -119,4 +119,5 @@ static inline void smp_send_stop(void) #define smp_setup_cpu_possible_map() do { } while (0) #endif +extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; #endif -- cgit v1.2.3-59-g8ed1b From 131a395c18af43d824841642038e5cc0d48f0bd2 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Fri, 27 Apr 2007 16:01:54 +0200 Subject: [S390] crypto: cleanup. Cleanup code and remove obsolete documentation. Signed-off-by: Jan Glauber Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- Documentation/s390/crypto/crypto-API.txt | 83 -------------------- arch/s390/crypto/sha1_s390.c | 129 ++++++++++++++++--------------- arch/s390/crypto/sha256_s390.c | 38 ++++----- 3 files changed, 81 insertions(+), 169 deletions(-) delete mode 100644 Documentation/s390/crypto/crypto-API.txt (limited to 'arch') diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt deleted file mode 100644 index 71ae6ca9f2c2..000000000000 --- a/Documentation/s390/crypto/crypto-API.txt +++ /dev/null @@ -1,83 +0,0 @@ -crypto-API support for z990 Message Security Assist (MSA) instructions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -AUTHOR: Thomas Spatzier (tspat@de.ibm.com) - - -1. Introduction crypto-API -~~~~~~~~~~~~~~~~~~~~~~~~~~ -See Documentation/crypto/api-intro.txt for an introduction/description of the -kernel crypto API. -According to api-intro.txt support for z990 crypto instructions has been added -in the algorithm api layer of the crypto API. Several files containing z990 -optimized implementations of crypto algorithms are placed in the -arch/s390/crypto directory. - - -2. Probing for availability of MSA -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -It should be possible to use Kernels with the z990 crypto implementations both -on machines with MSA available and on those without MSA (pre z990 or z990 -without MSA). Therefore a simple probing mechanism has been implemented: -In the init function of each crypto module the availability of MSA and of the -respective crypto algorithm in particular will be tested. If the algorithm is -available the module will load and register its algorithm with the crypto API. - -If the respective crypto algorithm is not available, the init function will -return -ENOSYS. In that case a fallback to the standard software implementation -of the crypto algorithm must be taken ( -> the standard crypto modules are -also built when compiling the kernel). - - -3. Ensuring z990 crypto module preference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If z990 crypto instructions are available the optimized modules should be -preferred instead of standard modules. - -3.1. compiled-in modules -~~~~~~~~~~~~~~~~~~~~~~~~ -For compiled-in modules it has to be ensured that the z990 modules are linked -before the standard crypto modules. Then, on system startup the init functions -of z990 crypto modules will be called first and query for availability of z990 -crypto instructions. If instruction is available, the z990 module will register -its crypto algorithm implementation -> the load of the standard module will fail -since the algorithm is already registered. -If z990 crypto instruction is not available the load of the z990 module will -fail -> the standard module will load and register its algorithm. - -3.2. dynamic modules -~~~~~~~~~~~~~~~~~~~~ -A system administrator has to take care of giving preference to z990 crypto -modules. If MSA is available appropriate lines have to be added to -/etc/modprobe.conf. - -Example: z990 crypto instruction for SHA1 algorithm is available - - add the following line to /etc/modprobe.conf (assuming the - z990 crypto modules for SHA1 is called sha1_z990): - - alias sha1 sha1_z990 - - -> when the sha1 algorithm is requested through the crypto API - (which has a module autoloader) the z990 module will be loaded. - -TBD: a userspace module probing mechanism - something like 'probe sha1 sha1_z990 sha1' in modprobe.conf - -> try module sha1_z990, if it fails to load standard module sha1 - the 'probe' statement is currently not supported in modprobe.conf - - -4. Currently implemented z990 crypto algorithms -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The following crypto algorithms with z990 MSA support are currently implemented. -The name of each algorithm under which it is registered in crypto API and the -name of the respective module is given in square brackets. - -- SHA1 Digest Algorithm [sha1 -> sha1_z990] -- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990] -- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990] -- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990] - -In order to load, for example, the sha1_z990 module when the sha1 algorithm is -requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf. - diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 969639f31977..af4460ec381f 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -25,99 +25,100 @@ */ #include #include -#include #include -#include -#include + #include "crypt_s390.h" #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 -struct crypt_s390_sha1_ctx { - u64 count; +struct s390_sha1_ctx { + u64 count; /* message length */ u32 state[5]; - u32 buf_len; - u8 buffer[2 * SHA1_BLOCK_SIZE]; + u8 buf[2 * SHA1_BLOCK_SIZE]; }; static void sha1_init(struct crypto_tfm *tfm) { - struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->state[0] = 0x67452301; - ctx->state[1] = 0xEFCDAB89; - ctx->state[2] = 0x98BADCFE; - ctx->state[3] = 0x10325476; - ctx->state[4] = 0xC3D2E1F0; - - ctx->count = 0; - ctx->buf_len = 0; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + + sctx->state[0] = 0x67452301; + sctx->state[1] = 0xEFCDAB89; + sctx->state[2] = 0x98BADCFE; + sctx->state[3] = 0x10325476; + sctx->state[4] = 0xC3D2E1F0; + sctx->count = 0; } static void sha1_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { - struct crypt_s390_sha1_ctx *sctx; - long imd_len; - - sctx = crypto_tfm_ctx(tfm); - sctx->count += len * 8; /* message bit length */ - - /* anything in buffer yet? -> must be completed */ - if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { - /* complete full block and hash */ - memcpy(sctx->buffer + sctx->buf_len, data, - SHA1_BLOCK_SIZE - sctx->buf_len); - crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, - SHA1_BLOCK_SIZE); - data += SHA1_BLOCK_SIZE - sctx->buf_len; - len -= SHA1_BLOCK_SIZE - sctx->buf_len; - sctx->buf_len = 0; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int index; + int ret; + + /* how much is already in the buffer? */ + index = sctx->count & 0x3f; + + sctx->count += len; + + if (index + len < SHA1_BLOCK_SIZE) + goto store; + + /* process one stored block */ + if (index) { + memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index); + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, + SHA1_BLOCK_SIZE); + BUG_ON(ret != SHA1_BLOCK_SIZE); + data += SHA1_BLOCK_SIZE - index; + len -= SHA1_BLOCK_SIZE - index; } - /* rest of data contains full blocks? */ - imd_len = len & ~0x3ful; - if (imd_len) { - crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); - data += imd_len; - len -= imd_len; + /* process as many blocks as possible */ + if (len >= SHA1_BLOCK_SIZE) { + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, + len & ~(SHA1_BLOCK_SIZE - 1)); + BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1))); + data += ret; + len -= ret; } - /* anything left? store in buffer */ - if (len) { - memcpy(sctx->buffer + sctx->buf_len , data, len); - sctx->buf_len += len; - } -} +store: + /* anything left? */ + if (len) + memcpy(sctx->buf + index , data, len); +} -static void pad_message(struct crypt_s390_sha1_ctx* sctx) +/* Add padding and return the message digest. */ +static void sha1_final(struct crypto_tfm *tfm, u8 *out) { - int index; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + u64 bits; + unsigned int index, end; + int ret; + + /* must perform manual padding */ + index = sctx->count & 0x3f; + end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE); - index = sctx->buf_len; - sctx->buf_len = (sctx->buf_len < 56) ? - SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; /* start pad with 1 */ - sctx->buffer[index] = 0x80; + sctx->buf[index] = 0x80; + /* pad with zeros */ index++; - memset(sctx->buffer + index, 0x00, sctx->buf_len - index); - /* append length */ - memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, - sizeof sctx->count); -} + memset(sctx->buf + index, 0x00, end - index - 8); -/* Add padding and return the message digest. */ -static void sha1_final(struct crypto_tfm *tfm, u8 *out) -{ - struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + /* append message length */ + bits = sctx->count * 8; + memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); + + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end); + BUG_ON(ret != end); - /* must perform manual padding */ - pad_message(sctx); - crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); /* copy digest to out */ memcpy(out, sctx->state, SHA1_DIGEST_SIZE); + /* wipe context */ memset(sctx, 0, sizeof *sctx); } @@ -128,7 +129,7 @@ static struct crypto_alg alg = { .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), + .cra_ctxsize = sizeof(struct s390_sha1_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 78436c696d37..2ced3330bce0 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -26,7 +26,7 @@ #define SHA256_BLOCK_SIZE 64 struct s390_sha256_ctx { - u64 count; + u64 count; /* message length */ u32 state[8]; u8 buf[2 * SHA256_BLOCK_SIZE]; }; @@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data, int ret; /* how much is already in the buffer? */ - index = sctx->count / 8 & 0x3f; + index = sctx->count & 0x3f; - /* update message bit length */ - sctx->count += len * 8; + sctx->count += len; if ((index + len) < SHA256_BLOCK_SIZE) goto store; @@ -87,12 +86,17 @@ store: memcpy(sctx->buf + index , data, len); } -static void pad_message(struct s390_sha256_ctx* sctx) +/* Add padding and return the message digest */ +static void sha256_final(struct crypto_tfm *tfm, u8 *out) { - int index, end; + struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); + u64 bits; + unsigned int index, end; + int ret; - index = sctx->count / 8 & 0x3f; - end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; + /* must perform manual padding */ + index = sctx->count & 0x3f; + end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE); /* start pad with 1 */ sctx->buf[index] = 0x80; @@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx) memset(sctx->buf + index, 0x00, end - index - 8); /* append message length */ - memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); - - sctx->count = end * 8; -} - -/* Add padding and return the message digest */ -static void sha256_final(struct crypto_tfm *tfm, u8 *out) -{ - struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); - - /* must perform manual padding */ - pad_message(sctx); + bits = sctx->count * 8; + memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); - crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, - sctx->count / 8); + ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end); + BUG_ON(ret != end); /* copy digest to out */ memcpy(out, sctx->state, SHA256_DIGEST_SIZE); -- cgit v1.2.3-59-g8ed1b From db77aa5f3d01fe6a6cc629dbd37936b1fdd129ba Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Fri, 27 Apr 2007 16:01:55 +0200 Subject: [S390] vtime: cleanup per_cpu usage. Replace per_cpu(... , smp_processor_id()) with __get_cpu_var() Signed-off-by: Jan Glauber Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/vtime.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 9d5b02801b46..1e1a6ee2cac1 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires) S390_lowcore.last_update_timer = expires; /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; + __get_cpu_var(virt_cpu_timer).to_expire = expires; } #else static inline void set_vtimer(__u64 expires) @@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires) asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; + __get_cpu_var(virt_cpu_timer).to_expire = expires; } #endif @@ -145,7 +145,7 @@ static void start_cpu_timer(void) { struct vtimer_queue *vt_list; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); /* CPU timer interrupt is pending, don't reprogramm it */ if (vt_list->idle & 1LL<<63) @@ -159,7 +159,7 @@ static void stop_cpu_timer(void) { struct vtimer_queue *vt_list; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); /* nothing to do */ if (list_empty(&vt_list->list)) { @@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list) if (list_empty(cb_list)) return; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); list_for_each_entry_safe(event, tmp, cb_list, entry) { fn = event->function; @@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list) */ static void do_cpu_timer_interrupt(__u16 error_code) { - int cpu; __u64 next, delta; struct vtimer_queue *vt_list; struct vtimer_list *event, *tmp; @@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code) struct list_head cb_list; INIT_LIST_HEAD(&cb_list); - cpu = smp_processor_id(); - vt_list = &per_cpu(virt_cpu_timer, cpu); + vt_list = &__get_cpu_var(virt_cpu_timer); /* walk timer list, fire all expired events */ spin_lock(&vt_list->lock); @@ -534,7 +532,7 @@ void init_cpu_vtimer(void) /* enable cpu timer interrupts */ __ctl_set_bit(0,10); - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); INIT_LIST_HEAD(&vt_list->list); spin_lock_init(&vt_list->lock); vt_list->to_expire = 0; -- cgit v1.2.3-59-g8ed1b From 2fc2d1e9ffcde78af7ab63ed640d9a4901797de2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:56 +0200 Subject: [S390] Processor degradation notification. Generate uevents for all cpus if cpu capability changes. This can happen e.g. because the cpus are overheating. The cpu capability can be read via /sys/devices/system/cpu/cpuN/capability. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/smp.c | 48 +++++++++++++++++++++++--- drivers/s390/char/Makefile | 2 +- drivers/s390/char/sclp.h | 2 ++ drivers/s390/char/sclp_config.c | 75 +++++++++++++++++++++++++++++++++++++++++ drivers/s390/sysinfo.c | 18 ++++++++++ include/asm-s390/processor.h | 1 + 6 files changed, 140 insertions(+), 6 deletions(-) create mode 100644 drivers/s390/char/sclp_config.c (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7c0143fdf710..2c5de92958dd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -821,19 +821,57 @@ int setup_profiling_timer(unsigned int multiplier) static DEFINE_PER_CPU(struct cpu, cpu_devices); +static ssize_t show_capability(struct sys_device *dev, char *buf) +{ + unsigned int capability; + int rc; + + rc = get_cpu_capability(&capability); + if (rc) + return rc; + return sprintf(buf, "%u\n", capability); +} +static SYSDEV_ATTR(capability, 0444, show_capability, NULL); + +static int __cpuinit smp_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + + switch (action) { + case CPU_ONLINE: + if (sysdev_create_file(s, &attr_capability)) + return NOTIFY_BAD; + break; + case CPU_DEAD: + sysdev_remove_file(s, &attr_capability); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata smp_cpu_nb = { + .notifier_call = smp_cpu_notify, +}; + static int __init topology_init(void) { int cpu; - int ret; + + register_cpu_notifier(&smp_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; c->hotpluggable = 1; - ret = register_cpu(c, cpu); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", cpu, ret); + register_cpu(c, cpu); + if (!cpu_online(cpu)) + continue; + s = &c->sysdev; + sysdev_create_file(s, &attr_capability); } return 0; } diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index a0f6db21855a..c210784bdf46 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,7 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_info.o sclp_chp.o + sclp_info.o sclp_config.o sclp_chp.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index e03dcf4c5fca..87ac4a3ad49d 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -27,6 +27,7 @@ #define EVTYP_CNTLPROGIDENT 0x0B #define EVTYP_SIGQUIESCE 0x1D #define EVTYP_VT220MSG 0x1A +#define EVTYP_CONFMGMDATA 0x04 #define EVTYP_SDIAS 0x1C #define EVTYP_OPCMD_MASK 0x80000000 @@ -37,6 +38,7 @@ #define EVTYP_CTLPROGIDENT_MASK 0x00200000 #define EVTYP_SIGQUIESCE_MASK 0x00000008 #define EVTYP_VT220MSG_MASK 0x00000040 +#define EVTYP_CONFMGMDATA_MASK 0x10000000 #define EVTYP_SDIAS_MASK 0x00000010 #define GNRLMSGFLGS_DOM 0x8000 diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c new file mode 100644 index 000000000000..5322e5e54a98 --- /dev/null +++ b/drivers/s390/char/sclp_config.c @@ -0,0 +1,75 @@ +/* + * drivers/s390/char/sclp_config.c + * + * Copyright IBM Corp. 2007 + * Author(s): Heiko Carstens + */ + +#include +#include +#include +#include +#include +#include "sclp.h" + +#define TAG "sclp_config: " + +struct conf_mgm_data { + u8 reserved; + u8 ev_qualifier; +} __attribute__((packed)); + +#define EV_QUAL_CAP_CHANGE 3 + +static struct work_struct sclp_cpu_capability_work; + +static void sclp_cpu_capability_notify(struct work_struct *work) +{ + int cpu; + struct sys_device *sysdev; + + printk(KERN_WARNING TAG "cpu capability changed.\n"); + lock_cpu_hotplug(); + for_each_online_cpu(cpu) { + sysdev = get_cpu_sysdev(cpu); + kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + } + unlock_cpu_hotplug(); +} + +static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) +{ + struct conf_mgm_data *cdata; + + cdata = (struct conf_mgm_data *)(evbuf + 1); + if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) + schedule_work(&sclp_cpu_capability_work); +} + +static struct sclp_register sclp_conf_register = +{ + .receive_mask = EVTYP_CONFMGMDATA_MASK, + .receiver_fn = sclp_conf_receiver_fn, +}; + +static int __init sclp_conf_init(void) +{ + int rc; + + INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); + + rc = sclp_register(&sclp_conf_register); + if (rc) { + printk(KERN_ERR TAG "failed to register (%d).\n", rc); + return rc; + } + + if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { + printk(KERN_WARNING TAG "no configuration management.\n"); + sclp_unregister(&sclp_conf_register); + rc = -ENOSYS; + } + return rc; +} + +__initcall(sclp_conf_init); diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 090743d2f914..19343f9675c3 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void) __initcall(create_proc_sysinfo); +int get_cpu_capability(unsigned int *capability) +{ + struct sysinfo_1_2_2 *info; + int rc; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return -ENOMEM; + rc = stsi(info, 1, 2, 2); + if (rc == -ENOSYS) + goto out; + rc = 0; + *capability = info->capability; +out: + free_page((unsigned long) info); + return rc; +} + /* * CPU capability might have changed. Therefore recalculate loops_per_jiffy. */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 96a6f80953a9..e0fcea8c64c3 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -57,6 +57,7 @@ struct cpuinfo_S390 extern void s390_adjust_jiffies(void); extern void print_cpu_info(struct cpuinfo_S390 *); +extern int get_cpu_capability(unsigned int *); /* Lazy FPU handling on uni-processor */ extern struct task_struct *last_task_used_math; -- cgit v1.2.3-59-g8ed1b From ecdcc0234b27472b561378ac59e2beeea06ec6ff Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:58 +0200 Subject: [S390] Switch etr from tasklet to workqueue. The clock synchronization of the ETR code requires an smp_call_function to synchronize all cpus. Calling smp_call_function from a tasklet is illegal. Replace the tasklet with a job on the global workqueue. ETR work is rare and can be postponed to a be done by a kernel thread. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/time.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e1ad464b6f20..711dae8da7ad 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code) } static void etr_reset(void); -static void etr_init(void); static void etr_ext_handler(__u16); /* @@ -355,7 +354,6 @@ void __init time_init(void) #ifdef CONFIG_VIRT_TIMER vtime_init(); #endif - etr_init(); } /* @@ -426,11 +424,11 @@ static struct etr_aib etr_port1; static int etr_port1_uptodate; static unsigned long etr_events; static struct timer_list etr_timer; -static struct tasklet_struct etr_tasklet; static DEFINE_PER_CPU(atomic_t, etr_sync_word); static void etr_timeout(unsigned long dummy); -static void etr_tasklet_fn(unsigned long dummy); +static void etr_work_fn(struct work_struct *work); +static DECLARE_WORK(etr_work, etr_work_fn); /* * The etr get_clock function. It will write the current clock value @@ -507,29 +505,31 @@ static void etr_reset(void) } } -static void etr_init(void) +static int __init etr_init(void) { struct etr_aib aib; if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) - return; + return 0; /* Check if this machine has the steai instruction. */ if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) set_bit(ETR_FLAG_STEAI, &etr_flags); setup_timer(&etr_timer, etr_timeout, 0UL); - tasklet_init(&etr_tasklet, etr_tasklet_fn, 0); if (!etr_port0_online && !etr_port1_online) set_bit(ETR_FLAG_EACCES, &etr_flags); if (etr_port0_online) { set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } if (etr_port1_online) { set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } + return 0; } +arch_initcall(etr_init); + /* * Two sorts of ETR machine checks. The architecture reads: * "When a machine-check niterruption occurs and if a switch-to-local or @@ -549,7 +549,7 @@ void etr_switch_to_local(void) return; etr_disable_sync_clock(NULL); set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -564,7 +564,7 @@ void etr_sync_check(void) return; etr_disable_sync_clock(NULL); set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code) * Both ports are not up-to-date now. */ set_bit(ETR_EVENT_PORT_ALERT, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } static void etr_timeout(unsigned long dummy) { set_bit(ETR_EVENT_UPDATE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, if (!eacr.e0 && !eacr.e1) return eacr; - /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ + /* Update port0 or port1 with aib stored in etr_work_fn. */ if (aib->esw.q == 0) { /* Information for port 0 stored. */ if (eacr.p0 && !etr_port0_uptodate) { @@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr) * particular this is the only function that calls etr_update_eacr(), * it "controls" the etr control register. */ -static void etr_tasklet_fn(unsigned long dummy) +static void etr_work_fn(struct work_struct *work) { unsigned long long now; struct etr_eacr eacr; @@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev, return count; /* Nothing to do. */ etr_port0_online = value; set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } else { if (etr_port1_online == value) return count; /* Nothing to do. */ etr_port1_online = value; set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } return count; } -- cgit v1.2.3-59-g8ed1b From 9ff6f4577e69801a43c0d58606a80040aecbc4bc Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:59 +0200 Subject: [S390] Remove debugging junk. arch/s390/appldata/appldata_base.c has some confusing debugging code left over to allow compiling it as a module. In practice, it cannot be configured as module and there is no need to keep that code. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/appldata/appldata_base.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) (limited to 'arch') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 0c3cf4b16ae4..ee89b33145d5 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops); EXPORT_SYMBOL_GPL(appldata_unregister_ops); EXPORT_SYMBOL_GPL(appldata_diag); -#ifdef MODULE -/* - * Kernel symbols needed by appldata_mem and appldata_os modules. - * However, if this file is compiled as a module (for testing only), these - * symbols are not exported. In this case, we define them locally and export - * those. - */ -void si_swapinfo(struct sysinfo *val) -{ - val->freeswap = -1ul; - val->totalswap = -1ul; -} - -unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200, - -1 - FIXED_1/200}; -int nr_threads = -1; - -void get_full_page_state(struct page_state *ps) -{ - memset(ps, -1, sizeof(struct page_state)); -} - -unsigned long nr_running(void) -{ - return -1; -} - -unsigned long nr_iowait(void) -{ - return -1; -} - -/*unsigned long nr_context_switches(void) -{ - return -1; -}*/ -#endif /* MODULE */ EXPORT_SYMBOL_GPL(si_swapinfo); EXPORT_SYMBOL_GPL(nr_threads); EXPORT_SYMBOL_GPL(nr_running); EXPORT_SYMBOL_GPL(nr_iowait); -//EXPORT_SYMBOL_GPL(nr_context_switches); -- cgit v1.2.3-59-g8ed1b From 39ce010d38bf6703b49f59eb73bef030b1d659f2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:02:00 +0200 Subject: [S390] Clean up smp code in preparation for some larger changes. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/smp.c | 261 +++++++++++++++++++------------------------------ include/asm-s390/smp.h | 5 - 2 files changed, 100 insertions(+), 166 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2c5de92958dd..3754e2031b39 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,12 +1,12 @@ /* * arch/s390/kernel/smp.c * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Heiko Carstens (heiko.carstens@de.ibm.com) * - * based on other smp stuff by + * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd * (c) 1998 Ingo Molnar * @@ -43,16 +43,17 @@ #include #include -extern volatile int __cpu_logical_map[]; - /* * An array with a pointer the lowcore of every CPU. */ - struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_online_map); + cpumask_t cpu_possible_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; @@ -72,7 +73,7 @@ struct call_data_struct { int wait; }; -static struct call_data_struct * call_data; +static struct call_data_struct *call_data; /* * 'Call function' interrupt callback @@ -152,8 +153,8 @@ out: * * Run a function on all other CPUs. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) @@ -179,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function); * * Run a function on one processor. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, - int wait, int cpu) + int wait, int cpu) { cpumask_t map = CPU_MASK_NONE; @@ -197,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on); static void do_send_stop(void) { - int cpu, rc; + int cpu, rc; - /* stop all processors */ + /* stop all processors */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -211,9 +212,9 @@ static void do_send_stop(void) static void do_store_status(void) { - int cpu, rc; + int cpu, rc; - /* store status of all processors in their lowcores (real 0) */ + /* store status of all processors in their lowcores (real 0) */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -221,8 +222,8 @@ static void do_store_status(void) rc = signal_processor_p( (__u32)(unsigned long) lowcore_ptr[cpu], cpu, sigp_store_status_at_address); - } while(rc == sigp_busy); - } + } while (rc == sigp_busy); + } } static void do_wait_for_stop(void) @@ -233,7 +234,7 @@ static void do_wait_for_stop(void) for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; - while(!smp_cpu_not_running(cpu)) + while (!smp_cpu_not_running(cpu)) cpu_relax(); } } @@ -247,7 +248,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - /* write magic number to zero page (absolute 0) */ + /* write magic number to zero page (absolute 0) */ lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; /* stop other processors. */ @@ -263,8 +264,7 @@ void smp_send_stop(void) /* * Reboot, halt and power_off routines for SMP. */ - -void machine_restart_smp(char * __unused) +void machine_restart_smp(char *__unused) { smp_send_stop(); do_reipl(); @@ -295,17 +295,17 @@ void machine_power_off_smp(void) static void do_ext_call_interrupt(__u16 code) { - unsigned long bits; + unsigned long bits; - /* - * handle bit signal external calls - * - * For the ec_schedule signal we have to do nothing. All the work - * is done automatically when we return from the interrupt. - */ + /* + * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. + */ bits = xchg(&S390_lowcore.ext_call_fast, 0); - if (test_bit(ec_call_function, &bits)) + if (test_bit(ec_call_function, &bits)) do_call_function(); } @@ -315,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code) */ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) { - /* - * Set signaling bit in lowcore of target cpu and kick it - */ + /* + * Set signaling bit in lowcore of target cpu and kick it + */ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) + while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) udelay(10); } @@ -334,7 +334,7 @@ void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - on_each_cpu(smp_ptlb_callback, NULL, 0, 1); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } EXPORT_SYMBOL(smp_ptlb_all); #endif /* ! CONFIG_64BIT */ @@ -346,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all); */ void smp_send_reschedule(int cpu) { - smp_ext_bitcall(cpu, ec_schedule); + smp_ext_bitcall(cpu, ec_schedule); } /* @@ -360,11 +360,12 @@ struct ec_creg_mask_parms { /* * callback for setting/clearing control bits */ -static void smp_ctl_bit_callback(void *info) { +static void smp_ctl_bit_callback(void *info) +{ struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; int i; - + __ctl_store(cregs, 0, 15); for (i = 0; i <= 15; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; @@ -383,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_set_bit); /* * Clear a bit in a control register of all cpus @@ -396,6 +398,7 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_clear_bit); #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) @@ -460,8 +463,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); * Lets check how many CPUs we have. */ -static unsigned int -__init smp_count_cpus(void) +static unsigned int __init smp_count_cpus(void) { unsigned int cpu, num_cpus; __u16 boot_cpu_addr; @@ -477,31 +479,30 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) continue; __cpu_logical_map[1] = (__u16) cpu; - if (signal_processor(1, sigp_sense) == - sigp_not_operational) + if (signal_processor(1, sigp_sense) == sigp_not_operational) continue; num_cpus++; } - printk("Detected %d CPU's\n",(int) num_cpus); + printk("Detected %d CPU's\n", (int) num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr); return num_cpus; } /* - * Activate a secondary processor. + * Activate a secondary processor. */ int __devinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ - cpu_init(); + /* Setup the cpu */ + cpu_init(); preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ - init_cpu_timer(); + init_cpu_timer(); #ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ - init_cpu_vtimer(); + init_cpu_vtimer(); #endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); @@ -510,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid) cpu_set(smp_processor_id(), cpu_online_map); /* Switch on interrupts */ local_irq_enable(); - /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); - /* cpu_idle will call schedule for us */ - cpu_idle(); - return 0; + /* Print info about this processor */ + print_cpu_info(&S390_lowcore.cpu_data); + /* cpu_idle will call schedule for us */ + cpu_idle(); + return 0; } static void __init smp_create_idle(unsigned int cpu) @@ -531,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; } -/* Reserving and releasing of CPUs */ - -static DEFINE_SPINLOCK(smp_reserve_lock); -static int smp_cpu_reserved[NR_CPUS]; - -int -smp_get_cpu(cpumask_t cpu_mask) -{ - unsigned long flags; - int cpu; - - spin_lock_irqsave(&smp_reserve_lock, flags); - /* Try to find an already reserved cpu. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (smp_cpu_reserved[cpu] != 0) { - smp_cpu_reserved[cpu]++; - /* Found one. */ - goto out; - } - } - /* Reserve a new cpu from cpu_mask. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (cpu_online(cpu)) { - smp_cpu_reserved[cpu]++; - goto out; - } - } - cpu = -ENODEV; -out: - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return cpu; -} - -void -smp_put_cpu(int cpu) -{ - unsigned long flags; - - spin_lock_irqsave(&smp_reserve_lock, flags); - smp_cpu_reserved[cpu]--; - spin_unlock_irqrestore(&smp_reserve_lock, flags); -} - -static int -cpu_stopped(int cpu) +static int cpu_stopped(int cpu) { __u32 status; /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { + if (signal_processor_ps(&status, 0, cpu, sigp_sense) == + sigp_status_stored) { if (status & 0x40) return 1; } @@ -589,14 +547,13 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ -int -__cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu) { struct task_struct *idle; - struct _lowcore *cpu_lowcore; + struct _lowcore *cpu_lowcore; struct stack_frame *sf; - sigp_ccode ccode; - int curr_cpu; + sigp_ccode ccode; + int curr_cpu; for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { __cpu_logical_map[cpu] = (__u16) curr_cpu; @@ -609,7 +566,7 @@ __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode){ + if (ccode) { printk("sigp_set_prefix failed for cpu %d " "with condition code %d\n", (int) cpu, (int) ccode); @@ -617,9 +574,9 @@ __cpu_up(unsigned int cpu) } idle = current_set[cpu]; - cpu_lowcore = lowcore_ptr[cpu]; + cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) - task_stack_page(idle) + (THREAD_SIZE); + task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct stack_frame)); @@ -631,11 +588,11 @@ __cpu_up(unsigned int cpu) " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; - cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->current_task = (unsigned long) idle; + cpu_lowcore->cpu_data.cpu_nr = cpu; eieio(); - while (signal_processor(cpu,sigp_restart) == sigp_busy) + while (signal_processor(cpu, sigp_restart) == sigp_busy) udelay(10); while (!cpu_online(cpu)) @@ -682,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } early_param("possible_cpus", setup_possible_cpus); -int -__cpu_disable(void) +int __cpu_disable(void) { - unsigned long flags; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); - spin_lock_irqsave(&smp_reserve_lock, flags); - if (smp_cpu_reserved[cpu] != 0) { - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return -EBUSY; - } cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ @@ -704,24 +654,23 @@ __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; - cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | - 1<<11 | 1<<10 | 1<< 6 | 1<< 4); + cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | + 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; - cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | - 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | + 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ cr_parms.orvals[14] = 0; - cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | + 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); - spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; } -void -__cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) @@ -729,13 +678,12 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); } -void -cpu_die(void) +void cpu_die(void) { idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); BUG(); - for(;;); + for (;;); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -748,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned long stack; unsigned int cpu; - int i; - - /* request the 0x1201 emergency signal external interrupt */ - if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) - panic("Couldn't request external interrupt 0x1201"); - memset(lowcore_ptr,0,sizeof(lowcore_ptr)); - /* - * Initialize prefix pages and stacks for all possible cpus - */ + int i; + + /* request the 0x1201 emergency signal external interrupt */ + if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) + panic("Couldn't request external interrupt 0x1201"); + memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); + /* + * Initialize prefix pages and stacks for all possible cpus + */ print_cpu_info(&S390_lowcore.cpu_data); - for_each_possible_cpu(i) { + for_each_possible_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) - __get_free_pages(GFP_KERNEL|GFP_DMA, - sizeof(void*) == 8 ? 1 : 0); - stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); - if (lowcore_ptr[i] == NULL || stack == 0ULL) + __get_free_pages(GFP_KERNEL | GFP_DMA, + sizeof(void*) == 8 ? 1 : 0); + stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; - lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); - stack = __get_free_pages(GFP_KERNEL,0); - if (stack == 0ULL) + lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; + stack = __get_free_pages(GFP_KERNEL, 0); + if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); - lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); + lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = - (__u32) __get_free_pages(GFP_KERNEL,0); - if (lowcore_ptr[i]->extended_save_area_addr == 0) + (__u32) __get_free_pages(GFP_KERNEL, 0); + if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " "allocate memory\n"); } @@ -816,7 +764,7 @@ void smp_cpus_done(unsigned int max_cpus) */ int setup_profiling_timer(unsigned int multiplier) { - return 0; + return 0; } static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -853,7 +801,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, } static struct notifier_block __cpuinitdata smp_cpu_nb = { - .notifier_call = smp_cpu_notify, + .notifier_call = smp_cpu_notify, }; static int __init topology_init(void) @@ -875,13 +823,4 @@ static int __init topology_init(void) } return 0; } - subsys_initcall(topology_init); - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); -EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(smp_ctl_set_bit); -EXPORT_SYMBOL(smp_ctl_clear_bit); -EXPORT_SYMBOL(smp_get_cpu); -EXPORT_SYMBOL(smp_put_cpu); diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 676e94ee15f0..0a28e6d6ef40 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) -extern int smp_get_cpu(cpumask_t cpu_map); -extern void smp_put_cpu(int cpu); - static inline __u16 hard_smp_processor_id(void) { __u16 cpu_address; @@ -114,8 +111,6 @@ static inline void smp_send_stop(void) } #define smp_cpu_not_running(cpu) 1 -#define smp_get_cpu(cpu) ({ 0; }) -#define smp_put_cpu(cpu) ({ 0; }) #define smp_setup_cpu_possible_map() do { } while (0) #endif -- cgit v1.2.3-59-g8ed1b