aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 18:03:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 18:03:08 -0700
commit194dfe88d62ed12d0cf30f6f20734c2d0d111533 (patch)
treef057597d411df53a152ac41ae8bd900aabb94994 /arch/ia64/kernel
parentMerge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm (diff)
parentnds32: Remove the architecture (diff)
downloadlinux-dev-194dfe88d62ed12d0cf30f6f20734c2d0d111533.tar.xz
linux-dev-194dfe88d62ed12d0cf30f6f20734c2d0d111533.zip
Merge tag 'asm-generic-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm-generic updates from Arnd Bergmann: "There are three sets of updates for 5.18 in the asm-generic tree: - The set_fs()/get_fs() infrastructure gets removed for good. This was already gone from all major architectures, but now we can finally remove it everywhere, which loses some particularly tricky and error-prone code. There is a small merge conflict against a parisc cleanup, the solution is to use their new version. - The nds32 architecture ends its tenure in the Linux kernel. The hardware is still used and the code is in reasonable shape, but the mainline port is not actively maintained any more, as all remaining users are thought to run vendor kernels that would never be updated to a future release. - A series from Masahiro Yamada cleans up some of the uapi header files to pass the compile-time checks" * tag 'asm-generic-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (27 commits) nds32: Remove the architecture uaccess: remove CONFIG_SET_FS ia64: remove CONFIG_SET_FS support sh: remove CONFIG_SET_FS support sparc64: remove CONFIG_SET_FS support lib/test_lockup: fix kernel pointer check for separate address spaces uaccess: generalize access_ok() uaccess: fix type mismatch warnings from access_ok() arm64: simplify access_ok() m68k: fix access_ok for coldfire MIPS: use simpler access_ok() MIPS: Handle address errors for accesses above CPU max virtual user address uaccess: add generic __{get,put}_kernel_nofault nios2: drop access_ok() check from __put_user() x86: use more conventional access_ok() definition x86: remove __range_not_ok() sparc64: add __{get,put}_kernel_nofault() nds32: fix access_ok() checks in get/put_user uaccess: fix nios2 and microblaze get_user_8() sparc64: fix building assembly files ...
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/unaligned.c60
1 files changed, 39 insertions, 21 deletions
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 6c1a8951dfbb..0acb5a0cd7ab 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
}
}
+static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode)
+{
+ if (kernel_mode)
+ return copy_to_kernel_nofault((void *)ifa, val, len);
+
+ return copy_to_user((void __user *)ifa, val, len);
+}
+
+static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode)
+{
+ if (kernel_mode)
+ return copy_from_kernel_nofault(val, (void *)ifa, len);
+
+ return copy_from_user(val, (void __user *)ifa, len);
+}
static int
-emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
unsigned int len = 1 << ld.x6_sz;
unsigned long val = 0;
@@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
return -1;
}
/* this assumes little-endian byte-order: */
- if (copy_from_user(&val, (void __user *) ifa, len))
+ if (emulate_load(&val, ifa, len, kernel_mode))
return -1;
setreg(ld.r1, val, 0, regs);
@@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
}
static int
-emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
unsigned long r2;
unsigned int len = 1 << ld.x6_sz;
@@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
}
/* this assumes little-endian byte-order: */
- if (copy_to_user((void __user *) ifa, &r2, len))
+ if (emulate_store(ifa, &r2, len, kernel_mode))
return -1;
/*
@@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
}
static int
-emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode)
{
struct ia64_fpreg fpr_init[2];
struct ia64_fpreg fpr_final[2];
@@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
* This assumes little-endian byte-order. Note that there is no "ldfpe"
* instruction:
*/
- if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
- || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
+ if (emulate_load(&fpr_init[0], ifa, len, kernel_mode)
+ || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode))
return -1;
DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
@@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
static int
-emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
struct ia64_fpreg fpr_init;
struct ia64_fpreg fpr_final;
@@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* See comments in ldX for descriptions on how the various loads are handled.
*/
if (ld.x6_op != 0x2) {
- if (copy_from_user(&fpr_init, (void __user *) ifa, len))
+ if (emulate_load(&fpr_init, ifa, len, kernel_mode))
return -1;
DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
@@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
static int
-emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
struct ia64_fpreg fpr_init;
struct ia64_fpreg fpr_final;
@@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
DDUMP("fpr_init =", &fpr_init, len);
DDUMP("fpr_final =", &fpr_final, len);
- if (copy_to_user((void __user *) ifa, &fpr_final, len))
+ if (emulate_store(ifa, &fpr_final, len, kernel_mode))
return -1;
/*
@@ -1295,7 +1314,6 @@ void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
{
struct ia64_psr *ipsr = ia64_psr(regs);
- mm_segment_t old_fs = get_fs();
unsigned long bundle[2];
unsigned long opcode;
const struct exception_table_entry *eh = NULL;
@@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
load_store_t insn;
} u;
int ret = -1;
+ bool kernel_mode = false;
if (ia64_psr(regs)->be) {
/* we don't support big-endian accesses */
@@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (unaligned_dump_stack)
dump_stack();
}
- set_fs(KERNEL_DS);
+ kernel_mode = true;
}
DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
- if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
+ if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode))
goto failure;
/*
@@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
case LDCCLR_IMM_OP:
case LDCNC_IMM_OP:
case LDCCLRACQ_IMM_OP:
- ret = emulate_load_int(ifa, u.insn, regs);
+ ret = emulate_load_int(ifa, u.insn, regs, kernel_mode);
break;
case ST_OP:
@@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
fallthrough;
case ST_IMM_OP:
case STREL_IMM_OP:
- ret = emulate_store_int(ifa, u.insn, regs);
+ ret = emulate_store_int(ifa, u.insn, regs, kernel_mode);
break;
case LDF_OP:
@@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
case LDFCCLR_OP:
case LDFCNC_OP:
if (u.insn.x)
- ret = emulate_load_floatpair(ifa, u.insn, regs);
+ ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode);
else
- ret = emulate_load_float(ifa, u.insn, regs);
+ ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
break;
case LDF_IMM_OP:
case LDFA_IMM_OP:
case LDFCCLR_IMM_OP:
case LDFCNC_IMM_OP:
- ret = emulate_load_float(ifa, u.insn, regs);
+ ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
break;
case STF_OP:
case STF_IMM_OP:
- ret = emulate_store_float(ifa, u.insn, regs);
+ ret = emulate_store_float(ifa, u.insn, regs, kernel_mode);
break;
default:
@@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
done:
- set_fs(old_fs); /* restore original address limit */
return;
failure: