aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/vm86_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/vm86_32.c')
-rw-r--r--arch/x86/kernel/vm86_32.c115
1 files changed, 57 insertions, 58 deletions
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 157e4bedd3c5..738c2104df30 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -70,10 +70,10 @@
/*
* 8- and 16-bit register defines..
*/
-#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0])
-#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1])
-#define IP(regs) (*(unsigned short *)&((regs)->pt.eip))
-#define SP(regs) (*(unsigned short *)&((regs)->pt.esp))
+#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
+#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
+#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
+#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
/*
* virtual flags (16 and 32-bit versions)
@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
{
int ret = 0;
- /* kernel_vm86_regs is missing xgs, so copy everything up to
+ /* kernel_vm86_regs is missing gs, so copy everything up to
(but not including) orig_eax, and then rest including orig_eax. */
- ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax));
- ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax,
+ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
+ ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
sizeof(struct kernel_vm86_regs) -
- offsetof(struct kernel_vm86_regs, pt.orig_eax));
+ offsetof(struct kernel_vm86_regs, pt.orig_ax));
return ret;
}
@@ -110,18 +110,17 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
{
int ret = 0;
- /* copy eax-xfs inclusive */
- ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax));
- /* copy orig_eax-__gsh+extra */
- ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax,
+ /* copy ax-fs inclusive */
+ ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
+ /* copy orig_ax-__gsh+extra */
+ ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
sizeof(struct kernel_vm86_regs) -
- offsetof(struct kernel_vm86_regs, pt.orig_eax) +
+ offsetof(struct kernel_vm86_regs, pt.orig_ax) +
extra);
return ret;
}
-struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
-struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
{
struct tss_struct *tss;
struct pt_regs *ret;
@@ -138,7 +137,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
printk("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
+ set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
if (tmp) {
@@ -147,15 +146,15 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
}
tss = &per_cpu(init_tss, get_cpu());
- current->thread.esp0 = current->thread.saved_esp0;
+ current->thread.sp0 = current->thread.saved_sp0;
current->thread.sysenter_cs = __KERNEL_CS;
- load_esp0(tss, &current->thread);
- current->thread.saved_esp0 = 0;
+ load_sp0(tss, &current->thread);
+ current->thread.saved_sp0 = 0;
put_cpu();
ret = KVM86->regs32;
- ret->xfs = current->thread.saved_fs;
+ ret->fs = current->thread.saved_fs;
loadsegment(gs, current->thread.saved_gs);
return ret;
@@ -197,7 +196,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
asmlinkage int sys_vm86old(struct pt_regs regs)
{
- struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx;
+ struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
@@ -207,7 +206,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
int tmp, ret = -EPERM;
tsk = current;
- if (tsk->thread.saved_esp0)
+ if (tsk->thread.saved_sp0)
goto out;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, vm86plus) -
@@ -237,12 +236,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
struct vm86plus_struct __user *v86;
tsk = current;
- switch (regs.ebx) {
+ switch (regs.bx) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ:
- ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx);
+ ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
goto out;
case VM86_PLUS_INSTALL_CHECK:
/* NOTE: on old vm86 stuff this will return the error
@@ -256,9 +255,9 @@ asmlinkage int sys_vm86(struct pt_regs regs)
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
ret = -EPERM;
- if (tsk->thread.saved_esp0)
+ if (tsk->thread.saved_sp0)
goto out;
- v86 = (struct vm86plus_struct __user *)regs.ecx;
+ v86 = (struct vm86plus_struct __user *)regs.cx;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, regs32) -
sizeof(info.regs));
@@ -281,23 +280,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
- info->regs.pt.xds = 0;
- info->regs.pt.xes = 0;
- info->regs.pt.xfs = 0;
+ info->regs.pt.ds = 0;
+ info->regs.pt.es = 0;
+ info->regs.pt.fs = 0;
/* we are clearing gs later just before "jmp resume_userspace",
* because it is not saved/restored.
*/
/*
- * The eflags register is also special: we cannot trust that the user
+ * The flags register is also special: we cannot trust that the user
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
- VEFLAGS = info->regs.pt.eflags;
- info->regs.pt.eflags &= SAFE_MASK;
- info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
- info->regs.pt.eflags |= VM_MASK;
+ VEFLAGS = info->regs.pt.flags;
+ info->regs.pt.flags &= SAFE_MASK;
+ info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
+ info->regs.pt.flags |= VM_MASK;
switch (info->cpu_type) {
case CPU_286:
@@ -315,18 +314,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
}
/*
- * Save old state, set default return value (%eax) to 0
+ * Save old state, set default return value (%ax) to 0
*/
- info->regs32->eax = 0;
- tsk->thread.saved_esp0 = tsk->thread.esp0;
- tsk->thread.saved_fs = info->regs32->xfs;
+ info->regs32->ax = 0;
+ tsk->thread.saved_sp0 = tsk->thread.sp0;
+ tsk->thread.saved_fs = info->regs32->fs;
savesegment(gs, tsk->thread.saved_gs);
tss = &per_cpu(init_tss, get_cpu());
- tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
- load_esp0(tss, &tsk->thread);
+ load_sp0(tss, &tsk->thread);
put_cpu();
tsk->thread.screen_bitmap = info->screen_bitmap;
@@ -352,7 +351,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
struct pt_regs * regs32;
regs32 = save_v86_state(regs16);
- regs32->eax = retval;
+ regs32->ax = retval;
__asm__ __volatile__("movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
"jmp resume_userspace"
@@ -373,30 +372,30 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
static inline void clear_TF(struct kernel_vm86_regs * regs)
{
- regs->pt.eflags &= ~TF_MASK;
+ regs->pt.flags &= ~TF_MASK;
}
static inline void clear_AC(struct kernel_vm86_regs * regs)
{
- regs->pt.eflags &= ~AC_MASK;
+ regs->pt.flags &= ~AC_MASK;
}
/* It is correct to call set_IF(regs) from the set_vflags_*
* functions. However someone forgot to call clear_IF(regs)
* in the opposite case.
* After the command sequence CLI PUSHF STI POPF you should
- * end up with interrups disabled, but you ended up with
+ * end up with interrupts disabled, but you ended up with
* interrupts enabled.
* ( I was testing my own changes, but the only bug I
* could find was in a function I had not changed. )
* [KD]
*/
-static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
+static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
{
- set_flags(VEFLAGS, eflags, current->thread.v86mask);
- set_flags(regs->pt.eflags, eflags, SAFE_MASK);
- if (eflags & IF_MASK)
+ set_flags(VEFLAGS, flags, current->thread.v86mask);
+ set_flags(regs->pt.flags, flags, SAFE_MASK);
+ if (flags & IF_MASK)
set_IF(regs);
else
clear_IF(regs);
@@ -405,7 +404,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
{
set_flags(VFLAGS, flags, current->thread.v86mask);
- set_flags(regs->pt.eflags, flags, SAFE_MASK);
+ set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & IF_MASK)
set_IF(regs);
else
@@ -414,7 +413,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
{
- unsigned long flags = regs->pt.eflags & RETURN_MASK;
+ unsigned long flags = regs->pt.flags & RETURN_MASK;
if (VEFLAGS & VIF_MASK)
flags |= IF_MASK;
@@ -518,7 +517,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
unsigned long __user *intr_ptr;
unsigned long segoffs;
- if (regs->pt.xcs == BIOSSEG)
+ if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, &KVM86->int_revectored))
goto cannot_handle;
@@ -530,9 +529,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs), cannot_handle);
- pushw(ssp, sp, regs->pt.xcs, cannot_handle);
+ pushw(ssp, sp, regs->pt.cs, cannot_handle);
pushw(ssp, sp, IP(regs), cannot_handle);
- regs->pt.xcs = segoffs >> 16;
+ regs->pt.cs = segoffs >> 16;
SP(regs) -= 6;
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
@@ -549,7 +548,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
if (VMPI.is_vm86pus) {
if ( (trapno==3) || (trapno==1) )
return_to_32bit(regs, VM86_TRAP + (trapno << 8));
- do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
+ do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
return 0;
}
if (trapno !=1)
@@ -585,10 +584,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
handle_vm86_trap(regs, 0, 1); \
return; } while (0)
- orig_flags = *(unsigned short *)&regs->pt.eflags;
+ orig_flags = *(unsigned short *)&regs->pt.flags;
- csp = (unsigned char __user *) (regs->pt.xcs << 4);
- ssp = (unsigned char __user *) (regs->pt.xss << 4);
+ csp = (unsigned char __user *) (regs->pt.cs << 4);
+ ssp = (unsigned char __user *) (regs->pt.ss << 4);
sp = SP(regs);
ip = IP(regs);
@@ -675,7 +674,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
SP(regs) += 6;
}
IP(regs) = newip;
- regs->pt.xcs = newcs;
+ regs->pt.cs = newcs;
CHECK_IF_IN_TRAP;
if (data32) {
set_vflags_long(newflags, regs);