aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2018-04-04 16:11:49 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2018-04-04 16:11:49 -0700
commit664b0bae0b87f69bc9deb098f5e0158b9cf18e04 (patch)
treed5841492b396ff483723b9339c7c11dc33b67688 /arch/s390/mm
parentInput: ALPS - fix TrackStick detection on Thinkpad L570 and Latitude 7370 (diff)
parentInput: i8042 - enable MUX on Sony VAIO VGN-CS series to fix touchpad (diff)
downloadlinux-dev-664b0bae0b87f69bc9deb098f5e0158b9cf18e04.tar.xz
linux-dev-664b0bae0b87f69bc9deb098f5e0158b9cf18e04.zip
Merge branch 'next' into for-linus
Prepare input updates for 4.17 merge window.
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/cmm.c9
-rw-r--r--arch/s390/mm/fault.c110
-rw-r--r--arch/s390/mm/gmap.c54
-rw-r--r--arch/s390/mm/init.c12
-rw-r--r--arch/s390/mm/mmap.c16
-rw-r--r--arch/s390/mm/pgalloc.c20
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/mm/vmem.c22
8 files changed, 121 insertions, 123 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 829c63dbc81a..6cf024eb2085 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Collaborative memory management interface.
*
@@ -56,10 +57,10 @@ static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
-static void cmm_timer_fn(unsigned long);
+static void cmm_timer_fn(struct timer_list *);
static void cmm_set_timer(void);
+static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
static long cmm_alloc_pages(long nr, long *counter,
struct cmm_page_array **list)
@@ -194,13 +195,11 @@ static void cmm_set_timer(void)
if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
return;
}
- cmm_timer.function = cmm_timer_fn;
- cmm_timer.data = 0;
cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
add_timer(&cmm_timer);
}
-static void cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(struct timer_list *unused)
{
long nr;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 242b78c0a9ec..93faeca52284 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -50,6 +50,13 @@
#define VM_FAULT_SIGNAL 0x080000
#define VM_FAULT_PFAULT 0x100000
+enum fault_type {
+ KERNEL_FAULT,
+ USER_FAULT,
+ VDSO_FAULT,
+ GMAP_FAULT,
+};
+
static unsigned long store_indication __read_mostly;
static int __init fault_init(void)
@@ -99,27 +106,34 @@ void bust_spinlocks(int yes)
}
/*
- * Returns the address space associated with the fault.
- * Returns 0 for kernel space and 1 for user space.
+ * Find out which address space caused the exception.
+ * Access register mode is impossible, ignore space == 3.
*/
-static inline int user_space_fault(struct pt_regs *regs)
+static inline enum fault_type get_fault_type(struct pt_regs *regs)
{
unsigned long trans_exc_code;
- /*
- * The lowest two bits of the translation exception
- * identification indicate which paging table was used.
- */
trans_exc_code = regs->int_parm_long & 3;
- if (trans_exc_code == 3) /* home space -> kernel */
- return 0;
- if (user_mode(regs))
- return 1;
- if (trans_exc_code == 2) /* secondary space -> set_fs */
- return current->thread.mm_segment.ar4;
- if (current->flags & PF_VCPU)
- return 1;
- return 0;
+ if (likely(trans_exc_code == 0)) {
+ /* primary space exception */
+ if (IS_ENABLED(CONFIG_PGSTE) &&
+ test_pt_regs_flag(regs, PIF_GUEST_FAULT))
+ return GMAP_FAULT;
+ if (current->thread.mm_segment == USER_DS)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ if (trans_exc_code == 2) {
+ /* secondary space exception */
+ if (current->thread.mm_segment & 1) {
+ if (current->thread.mm_segment == USER_DS_SACF)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ return VDSO_FAULT;
+ }
+ /* home space exception -> access via kernel ASCE */
+ return KERNEL_FAULT;
}
static int bad_address(void *p)
@@ -204,20 +218,23 @@ static void dump_fault_info(struct pt_regs *regs)
break;
}
pr_cont("mode while using ");
- if (!user_space_fault(regs)) {
- asce = S390_lowcore.kernel_asce;
- pr_cont("kernel ");
- }
-#ifdef CONFIG_PGSTE
- else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
- struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
- asce = gmap->asce;
- pr_cont("gmap ");
- }
-#endif
- else {
+ switch (get_fault_type(regs)) {
+ case USER_FAULT:
asce = S390_lowcore.user_asce;
pr_cont("user ");
+ break;
+ case VDSO_FAULT:
+ asce = S390_lowcore.vdso_asce;
+ pr_cont("vdso ");
+ break;
+ case GMAP_FAULT:
+ asce = ((struct gmap *) S390_lowcore.gmap)->asce;
+ pr_cont("gmap ");
+ break;
+ case KERNEL_FAULT:
+ asce = S390_lowcore.kernel_asce;
+ pr_cont("kernel ");
+ break;
}
pr_cont("ASCE.\n");
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
@@ -273,7 +290,7 @@ static noinline void do_no_context(struct pt_regs *regs)
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (!user_space_fault(regs))
+ if (get_fault_type(regs) == KERNEL_FAULT)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" in virtual kernel address space\n");
else
@@ -395,12 +412,11 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
*/
static inline int do_exception(struct pt_regs *regs, int access)
{
-#ifdef CONFIG_PGSTE
struct gmap *gmap;
-#endif
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ enum fault_type type;
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
@@ -425,8 +441,19 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
+ type = get_fault_type(regs);
+ switch (type) {
+ case KERNEL_FAULT:
+ goto out;
+ case VDSO_FAULT:
+ fault = VM_FAULT_BADMAP;
goto out;
+ case USER_FAULT:
+ case GMAP_FAULT:
+ if (faulthandler_disabled() || !mm)
+ goto out;
+ break;
+ }
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -437,10 +464,9 @@ static inline int do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
-#ifdef CONFIG_PGSTE
- gmap = (current->flags & PF_VCPU) ?
- (struct gmap *) S390_lowcore.gmap : NULL;
- if (gmap) {
+ gmap = NULL;
+ if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
+ gmap = (struct gmap *) S390_lowcore.gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
@@ -452,7 +478,6 @@ static inline int do_exception(struct pt_regs *regs, int access)
if (gmap->pfault_enabled)
flags |= FAULT_FLAG_RETRY_NOWAIT;
}
-#endif
retry:
fault = VM_FAULT_BADMAP;
@@ -507,15 +532,14 @@ retry:
regs, address);
}
if (fault & VM_FAULT_RETRY) {
-#ifdef CONFIG_PGSTE
- if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
+ (flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* FAULT_FLAG_RETRY_NOWAIT has been set,
* mmap_sem has not been released */
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
-#endif
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~(FAULT_FLAG_ALLOW_RETRY |
@@ -525,8 +549,7 @@ retry:
goto retry;
}
}
-#ifdef CONFIG_PGSTE
- if (gmap) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
if (address == -EFAULT) {
@@ -538,7 +561,6 @@ retry:
goto out_up;
}
}
-#endif
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -706,7 +728,7 @@ static void pfault_interrupt(struct ext_code ext_code,
return;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
- pid = param64 & LPP_PFAULT_PID_MASK;
+ pid = param64 & LPP_PID_MASK;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2f66290c9b92..2c55a2b9d6c6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* KVM guest address space mapping code
*
@@ -814,27 +815,17 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
* @ptl: pointer to the spinlock pointer
*
* Returns a pointer to the locked pte for a guest address, or NULL
- *
- * Note: Can also be called for shadow gmaps.
*/
static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
spinlock_t **ptl)
{
unsigned long *table;
- if (gmap_is_shadow(gmap))
- spin_lock(&gmap->guest_table_lock);
+ BUG_ON(gmap_is_shadow(gmap));
/* Walk the gmap page table, lock and get pte pointer */
table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
- if (!table || *table & _SEGMENT_ENTRY_INVALID) {
- if (gmap_is_shadow(gmap))
- spin_unlock(&gmap->guest_table_lock);
+ if (!table || *table & _SEGMENT_ENTRY_INVALID)
return NULL;
- }
- if (gmap_is_shadow(gmap)) {
- *ptl = &gmap->guest_table_lock;
- return pte_offset_map((pmd_t *) table, gaddr);
- }
return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
}
@@ -888,8 +879,6 @@ static void gmap_pte_op_end(spinlock_t *ptl)
* -EFAULT if gaddr is invalid (or mapping for shadows is missing).
*
* Called with sg->mm->mmap_sem in read.
- *
- * Note: Can also be called for shadow gmaps.
*/
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
@@ -899,6 +888,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
pte_t *ptep;
int rc;
+ BUG_ON(gmap_is_shadow(gmap));
while (len) {
rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
@@ -959,7 +949,8 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
* @val: pointer to the unsigned long value to return
*
* Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
- * if reading using the virtual address failed.
+ * if reading using the virtual address failed. -EINVAL if called on a gmap
+ * shadow.
*
* Called with gmap->mm->mmap_sem in read.
*/
@@ -970,6 +961,9 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
pte_t *ptep, pte;
int rc;
+ if (gmap_is_shadow(gmap))
+ return -EINVAL;
+
while (1) {
rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
@@ -1027,18 +1021,17 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
}
/**
- * gmap_protect_rmap - modify access rights to memory and create an rmap
+ * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow gmap
* @paddr: address in the parent guest address space
* @len: length of the memory area to protect
- * @prot: indicates access rights: none, read-only or read-write
*
* Returns 0 if successfully protected and the rmap was created, -ENOMEM
* if out of memory and -EFAULT if paddr is invalid.
*/
static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
- unsigned long paddr, unsigned long len, int prot)
+ unsigned long paddr, unsigned long len)
{
struct gmap *parent;
struct gmap_rmap *rmap;
@@ -1066,7 +1059,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
ptep = gmap_pte_op_walk(parent, paddr, &ptl);
if (ptep) {
spin_lock(&sg->guest_table_lock);
- rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
+ rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
PGSTE_VSIE_BIT);
if (!rc)
gmap_insert_rmap(sg, vmaddr, rmap);
@@ -1076,7 +1069,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
radix_tree_preload_end();
if (rc) {
kfree(rmap);
- rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
+ rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
if (rc)
return rc;
continue;
@@ -1187,12 +1180,11 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
unsigned long *sgt)
{
- unsigned long asce, *pgt;
+ unsigned long *pgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
continue;
@@ -1245,12 +1237,11 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
unsigned long *r3t)
{
- unsigned long asce, *sgt;
+ unsigned long *sgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
continue;
@@ -1303,12 +1294,11 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
unsigned long *r2t)
{
- unsigned long asce, *r3t;
+ unsigned long *r3t;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
continue;
@@ -1618,7 +1608,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
origin = r2t & _REGION_ENTRY_ORIGIN;
offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
- rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 4);
@@ -1701,7 +1691,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
origin = r3t & _REGION_ENTRY_ORIGIN;
offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
- rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 3);
@@ -1785,7 +1775,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
origin = sgt & _REGION_ENTRY_ORIGIN;
offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
- rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 2);
@@ -1904,7 +1894,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
/* Make pgt read-only in parent gmap page table (not the pgste) */
raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
- rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 1);
@@ -2007,7 +1997,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_page);
* Called with sg->parent->shadow_lock.
*/
static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
- unsigned long gaddr, pte_t *pte)
+ unsigned long gaddr)
{
struct gmap_rmap *rmap, *rnext, *head;
unsigned long start, end, bits, raddr;
@@ -2092,7 +2082,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
spin_lock(&gmap->shadow_lock);
list_for_each_entry_safe(sg, next,
&gmap->children, list)
- gmap_shadow_notify(sg, vmaddr, gaddr, pte);
+ gmap_shadow_notify(sg, vmaddr, gaddr);
spin_unlock(&gmap->shadow_lock);
}
if (bits & PGSTE_IN_BIT)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 41ba9bd53e48..3fa3e5323612 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -95,6 +95,7 @@ void __init paging_init(void)
}
init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
@@ -145,8 +146,8 @@ void __init mem_init(void)
void free_initmem(void)
{
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
@@ -221,7 +222,8 @@ device_initcall(s390_cma_mem_init);
#endif /* CONFIG_CMA */
-int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
+ bool want_memblock)
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
@@ -231,14 +233,14 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
if (rc)
return rc;
- rc = __add_pages(nid, start_pfn, size_pages, want_memblock);
+ rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(u64 start, u64 size)
+int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
/*
* There is no hardware or firmware interface which could trigger a
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 5bea139517a2..831bdcf407bb 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* Started by Ingo Molnar <mingo@elte.hu>
*/
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index cc2faffa7d6e..cb364153c43c 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -71,10 +71,8 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- if (current->active_mm == mm) {
- clear_user_asce();
+ if (current->active_mm == mm)
set_user_asce(mm);
- }
__tlb_flush_local();
}
@@ -85,8 +83,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
- if (end >= TASK_SIZE_MAX)
- return -ENOMEM;
rc = 0;
notify = 0;
while (mm->context.asce_limit < end) {
@@ -159,13 +155,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
struct page *page_table_alloc_pgste(struct mm_struct *mm)
{
struct page *page;
- unsigned long *table;
+ u64 *table;
page = alloc_page(GFP_KERNEL);
if (page) {
- table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ table = (u64 *)page_to_phys(page);
+ memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
return page;
}
@@ -222,12 +218,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
atomic_set(&page->_mapcount, 3);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else {
/* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE);
+ memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.lock);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae677f814bc0..4f2b65d01a70 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index f2ada0bc08e6..db55561c5981 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -60,7 +60,7 @@ pte_t __ref *vmem_pte_alloc(void)
pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID, size);
+ memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
@@ -211,7 +211,8 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap)
{
unsigned long pgt_prot, sgt_prot;
unsigned long address = start;
@@ -296,7 +297,8 @@ out:
return ret;
}
-void vmemmap_free(unsigned long start, unsigned long end)
+void vmemmap_free(unsigned long start, unsigned long end,
+ struct vmem_altmap *altmap)
{
}
@@ -403,17 +405,17 @@ void __init vmem_map_init(void)
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
- __set_memory((unsigned long) _stext,
- (_etext - _stext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long) _etext,
- (_eshared - _etext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
SET_MEMORY_RO);
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
- (_eshared - _stext) >> 10);
+ (unsigned long)(__end_rodata - _stext) >> 10);
}
/*