aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/archrandom.h5
-rw-r--r--arch/s390/include/asm/ccwdev.h14
-rw-r--r--arch/s390/include/asm/cio.h3
-rw-r--r--arch/s390/include/asm/delay.h11
-rw-r--r--arch/s390/include/asm/ftrace.h31
-rw-r--r--arch/s390/include/asm/futex.h6
-rw-r--r--arch/s390/include/asm/kasan.h37
-rw-r--r--arch/s390/include/asm/livepatch.h5
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h46
-rw-r--r--arch/s390/include/asm/pgtable.h13
-rw-r--r--arch/s390/include/asm/processor.h20
-rw-r--r--arch/s390/include/asm/ptrace.h1
-rw-r--r--arch/s390/include/asm/sclp.h7
-rw-r--r--arch/s390/include/asm/seccomp.h9
-rw-r--r--arch/s390/include/asm/setup.h6
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/asm/timex.h7
-rw-r--r--arch/s390/include/asm/uaccess.h22
-rw-r--r--arch/s390/include/asm/vdso.h25
-rw-r--r--arch/s390/include/asm/vtime.h1
-rw-r--r--arch/s390/include/uapi/asm/signal.h24
22 files changed, 111 insertions, 190 deletions
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index de61ce562052..5dc712fde3c7 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -2,7 +2,7 @@
/*
* Kernel interface for the s390 arch_random_* functions
*
- * Copyright IBM Corp. 2017
+ * Copyright IBM Corp. 2017, 2020
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@@ -19,10 +19,13 @@
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;
+bool s390_arch_get_random_long(unsigned long *v);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
+ if (static_branch_likely(&s390_arch_random_available))
+ return s390_arch_get_random_long(v);
return false;
}
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index c0be5fe1ddba..778247bb1d61 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -104,6 +104,8 @@ struct ccw_device {
was successfully verified. */
#define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had
to be established again. */
+#define PE_PATH_FCES_EVENT 0x8 /* The FCES Status of a path has
+ * changed. */
/*
* Possible CIO actions triggered by the unit check handler.
@@ -115,7 +117,7 @@ enum uc_todo {
};
/**
- * struct ccw driver - device driver for channel attached devices
+ * struct ccw_driver - device driver for channel attached devices
* @ids: ids supported by this driver
* @probe: function called on probe
* @remove: function called on remove
@@ -124,11 +126,6 @@ enum uc_todo {
* @notify: notify driver of device state changes
* @path_event: notify driver of channel path events
* @shutdown: called at device shutdown
- * @prepare: prepare for pm state transition
- * @complete: undo work done in @prepare
- * @freeze: callback for freezing during hibernation snapshotting
- * @thaw: undo work done in @freeze
- * @restore: callback for restoring after hibernation
* @uc_handler: callback for unit check handler
* @driver: embedded device driver structure
* @int_class: interruption class to use for accounting interrupts
@@ -142,11 +139,6 @@ struct ccw_driver {
int (*notify) (struct ccw_device *, int);
void (*path_event) (struct ccw_device *, int *);
void (*shutdown) (struct ccw_device *);
- int (*prepare) (struct ccw_device *);
- void (*complete) (struct ccw_device *);
- int (*freeze)(struct ccw_device *);
- int (*thaw) (struct ccw_device *);
- int (*restore)(struct ccw_device *);
enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
struct device_driver driver;
enum interruption_class int_class;
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 5c58756d6476..ac02df906cae 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -329,7 +329,7 @@ struct ccw_dev_id {
};
/**
- * ccw_device_id_is_equal() - compare two ccw_dev_ids
+ * ccw_dev_id_is_equal() - compare two ccw_dev_ids
* @dev_id1: a ccw_dev_id
* @dev_id2: another ccw_dev_id
* Returns:
@@ -373,5 +373,6 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
int chsc_stzi(void *page, void *result, size_t size);
int chsc_sgib(u32 origin);
+int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid);
#endif
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 898323fd93d2..21a8fe18fe66 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -13,13 +13,12 @@
#ifndef _S390_DELAY_H
#define _S390_DELAY_H
-void __ndelay(unsigned long long nsecs);
-void __udelay(unsigned long long usecs);
-void udelay_simple(unsigned long long usecs);
+void __ndelay(unsigned long nsecs);
+void __udelay(unsigned long usecs);
void __delay(unsigned long loops);
-#define ndelay(n) __ndelay((unsigned long long) (n))
-#define udelay(n) __udelay((unsigned long long) (n))
-#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
+#define ndelay(n) __ndelay((unsigned long)(n))
+#define udelay(n) __udelay((unsigned long)(n))
+#define mdelay(n) __udelay((unsigned long)(n) * 1000)
#endif /* defined(_S390_DELAY_H) */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 68d362f8d6c1..695c61989f97 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -2,16 +2,9 @@
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
-
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
#define MCOUNT_INSN_SIZE 6
-#else
-#define MCOUNT_INSN_SIZE 24
-#define MCOUNT_RETURN_FIXUP 18
-#endif
-
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#ifndef __ASSEMBLY__
@@ -22,7 +15,6 @@
#define ftrace_return_address(n) __builtin_return_address(n)
#endif
-void _mcount(void);
void ftrace_caller(void);
extern char ftrace_graph_caller_end;
@@ -30,12 +22,20 @@ extern unsigned long ftrace_plt;
struct dyn_arch_ftrace { };
-#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_ADDR 0
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#define KPROBE_ON_FTRACE_NOP 0
#define KPROBE_ON_FTRACE_CALL 1
+struct module;
+struct dyn_ftrace;
+/*
+ * Either -mhotpatch or -mnop-mcount is used - no explicit init is required
+ */
+static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return 0; }
+#define ftrace_init_nop ftrace_init_nop
+
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@@ -49,28 +49,17 @@ struct ftrace_insn {
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
/* brcl 0,0 */
insn->opc = 0xc004;
insn->disp = 0;
-#else
- /* jg .+24 */
- insn->opc = 0xc0f4;
- insn->disp = MCOUNT_INSN_SIZE / 2;
-#endif
#endif
}
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
if (insn->disp == 0)
return 1;
-#else
- if (insn->disp == MCOUNT_INSN_SIZE / 2)
- return 1;
-#endif
#endif
return 0;
}
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 26f9144562c9..c22debfcebf1 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -26,9 +26,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
int oldval = 0, newval, ret;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n",
@@ -53,7 +51,6 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
default:
ret = -ENOSYS;
}
- disable_sacf_uaccess(old_fs);
if (!ret)
*oval = oldval;
@@ -64,10 +61,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- mm_segment_t old_fs;
int ret;
- old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
@@ -77,7 +72,6 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory");
- disable_sacf_uaccess(old_fs);
*uval = oldval;
return ret;
}
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index e9bf486de136..76f351bd6645 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,28 +2,51 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
+#include <asm/pgtable.h>
+
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
-#ifdef CONFIG_KASAN_S390_4_LEVEL_PAGING
#define KASAN_SHADOW_SIZE \
(_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
-#else
-#define KASAN_SHADOW_SIZE \
- (_AC(1, UL) << (_REGION2_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
-#endif
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
extern void kasan_early_init(void);
-extern void kasan_copy_shadow(pgd_t *dst);
+extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;
+
+/*
+ * Estimate kasan memory requirements, which it will reserve
+ * at the very end of available physical memory. To estimate
+ * that, we take into account that kasan would require
+ * 1/8 of available physical memory (for shadow memory) +
+ * creating page tables for the whole memory + shadow memory
+ * region (1 + 1/8). To keep page tables estimates simple take
+ * the double of combined ptes size.
+ *
+ * physmem parameter has to be already adjusted if not entire physical memory
+ * would be used (e.g. due to effect of "mem=" option).
+ */
+static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
+{
+ unsigned long kasan_needs;
+ unsigned long pages;
+ /* for shadow memory */
+ kasan_needs = round_up(physmem / 8, PAGE_SIZE);
+ /* for paging structures */
+ pages = DIV_ROUND_UP(physmem + kasan_needs, PAGE_SIZE);
+ kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
+
+ return kasan_needs;
+}
#else
static inline void kasan_early_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *dst) { }
+static inline void kasan_copy_shadow_mapping(void) { }
static inline void kasan_free_early_identity(void) { }
+static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif
#endif
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 818612b784cd..d578a8c76676 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -11,10 +11,13 @@
#ifndef ASM_LIVEPATCH_H
#define ASM_LIVEPATCH_H
+#include <linux/ftrace.h>
#include <asm/ptrace.h>
-static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
+
regs->psw.addr = ip;
}
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 612ed3c6d581..69ce9191eaf1 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -116,7 +116,7 @@ struct lowcore {
/* Address space pointer. */
__u64 kernel_asce; /* 0x0380 */
__u64 user_asce; /* 0x0388 */
- __u64 vdso_asce; /* 0x0390 */
+ __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
/*
* The lpp and current_pid fields form a
@@ -134,7 +134,7 @@ struct lowcore {
__u32 spinlock_index; /* 0x03b0 */
__u32 fpu_flags; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b8 */
- __u64 vdso_per_cpu_data; /* 0x03c0 */
+ __u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
__u64 machine_flags; /* 0x03c8 */
__u64 gmap; /* 0x03d0 */
__u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c9f3d8a52756..e7cffc7b5c2f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
#include <asm/ctl_reg.h>
#include <asm-generic/mm_hooks.h>
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -69,41 +70,18 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-#define destroy_context(mm) do { } while (0)
-
-static inline void set_user_asce(struct mm_struct *mm)
-{
- S390_lowcore.user_asce = mm->context.asce;
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
-}
-
-static inline void clear_user_asce(void)
-{
- S390_lowcore.user_asce = S390_lowcore.kernel_asce;
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
-}
-
-mm_segment_t enable_sacf_uaccess(void);
-void disable_sacf_uaccess(mm_segment_t old_fs);
-
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce;
+ if (next == &init_mm)
+ S390_lowcore.user_asce = s390_invalid_asce;
+ else
+ S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- /* Clear previous user-ASCE from CR1 and CR7 */
- if (!test_cpu_flag(CIF_ASCE_PRIMARY)) {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- if (test_cpu_flag(CIF_ASCE_SECONDARY)) {
- __ctl_load(S390_lowcore.vdso_asce, 7, 7);
- clear_cpu_flag(CIF_ASCE_SECONDARY);
- }
+ /* Clear previous user-ASCE from CR7 */
+ __ctl_load(s390_invalid_asce, 7, 7);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -122,18 +100,18 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- set_fs(current->thread.mm_segment);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
-#define enter_lazy_tlb(mm,tsk) do { } while (0)
-#define deactivate_mm(tsk,mm) do { } while (0)
-
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- set_user_asce(next);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
+#include <asm-generic/mmu_context.h>
+
#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b5dbae78969b..794746a32806 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -23,6 +23,7 @@
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+extern unsigned long s390_invalid_asce;
enum {
PG_DIRECT_MAP_4K = 0,
@@ -79,15 +80,15 @@ extern unsigned long zero_page_mask;
/*
* The vmalloc and module area will always be on the topmost area of the
- * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
- * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
- * modules will reside. That makes sure that inter module branches always
- * happen without trampolines and in addition the placement within a 2GB frame
- * is branch prediction unit friendly.
+ * kernel mapping. 512GB are reserved for vmalloc by default.
+ * At the top of the vmalloc area a 2GB area is reserved where modules
+ * will reside. That makes sure that inter module branches always
+ * happen without trampolines and in addition the placement within a
+ * 2GB frame is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
-#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
+#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
extern struct page *vmemmap;
extern unsigned long vmemmap_size;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 962da04234af..2058a435add4 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,20 +14,14 @@
#include <linux/bits.h>
-#define CIF_ASCE_PRIMARY 0 /* primary asce needs fixup / uaccess */
-#define CIF_ASCE_SECONDARY 1 /* secondary asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */
-#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
-#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY)
-#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_FPU BIT(CIF_FPU)
-#define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
@@ -102,8 +96,6 @@ extern void __bpon(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-typedef unsigned int mm_segment_t;
-
/*
* Thread structure
*/
@@ -116,7 +108,6 @@ struct thread_struct {
unsigned long hardirq_timer; /* task cputime in hardirq context */
unsigned long softirq_timer; /* task cputime in softirq context */
unsigned long sys_call_table; /* system call table address */
- mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
@@ -300,11 +291,6 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
}
/*
- * Function to stop a processor until the next interrupt occurs
- */
-void enabled_wait(void);
-
-/*
* Function to drop a processor into disabled wait state
*/
static __always_inline void __noreturn disabled_wait(void)
@@ -318,14 +304,10 @@ static __always_inline void __noreturn disabled_wait(void)
}
/*
- * Basic Machine Check/Program Check Handler.
+ * Basic Program Check Handler.
*/
-
extern void s390_base_pgm_handler(void);
-extern void s390_base_ext_handler(void);
-
extern void (*s390_base_pgm_handler_fn)(void);
-extern void (*s390_base_ext_handler_fn)(void);
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 16b3e4396312..73ca7f7cac33 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -87,6 +87,7 @@ struct pt_regs
unsigned int int_parm;
unsigned long int_parm_long;
unsigned long flags;
+ unsigned long cr1;
};
/*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index a7bdd128d85b..5763769a39b6 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -12,7 +12,12 @@
#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
-#define SCLP_MAX_CORES 256
+#define EARLY_SCCB_SIZE PAGE_SIZE
+#define SCLP_MAX_CORES 512
+/* 144 + 16 * SCLP_MAX_CORES + 2 * (SCLP_MAX_CORES - 1) */
+#define EXT_SCCB_READ_SCP (3 * PAGE_SIZE)
+/* 24 + 16 * SCLP_MAX_CORES */
+#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
struct sclp_chp_info {
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
index 795bbe0d7ca6..71d46f0ba97b 100644
--- a/arch/s390/include/asm/seccomp.h
+++ b/arch/s390/include/asm/seccomp.h
@@ -16,4 +16,13 @@
#include <asm-generic/seccomp.h>
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_S390X
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "s390x"
+#ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_S390
+# define SECCOMP_ARCH_COMPAT_NR NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "s390"
+#endif
+
#endif /* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index bdb242a1544e..3e388fa208d4 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -16,8 +16,6 @@
#define EARLY_SCCB_OFFSET 0x11000
#define HEAD_END 0x12000
-#define EARLY_SCCB_SIZE PAGE_SIZE
-
/*
* Machine features detected in early.c
*/
@@ -88,10 +86,8 @@ extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_FULL_DEBUG 4
extern int noexec_disabled;
-extern int memory_end_set;
-extern unsigned long memory_end;
+extern unsigned long ident_map_size;
extern unsigned long vmalloc_size;
-extern unsigned long max_physmem_end;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 13a04fcf7762..3c5b1f909b6d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -18,7 +18,7 @@
#else
#define THREAD_SIZE_ORDER 2
#endif
-#define BOOT_STACK_ORDER 2
+#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
@@ -65,6 +65,7 @@ void arch_setup_new_exec(void);
#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
+#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
@@ -82,6 +83,7 @@ void arch_setup_new_exec(void);
#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
+#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
#define _TIF_UPROBE BIT(TIF_UPROBE)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 289aaff4d365..c8e244ecdfde 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -49,6 +49,13 @@ static inline void set_clock_comparator(__u64 time)
asm volatile("sckc %0" : : "Q" (time));
}
+static inline void set_tod_programmable_field(u16 val)
+{
+ register unsigned long reg0 asm("0") = val;
+
+ asm volatile("sckpf" : : "d" (reg0));
+}
+
void clock_comparator_work(void);
void __init time_early_init(void);
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index c868e7ee49b3..c6707885e7c2 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -18,23 +18,7 @@
#include <asm/extable.h>
#include <asm/facility.h>
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define KERNEL_DS (0)
-#define KERNEL_DS_SACF (1)
-#define USER_DS (2)
-#define USER_DS_SACF (3)
-
-#define get_fs() (current->thread.mm_segment)
-#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS)
-
-void set_fs(mm_segment_t fs);
+void debug_user_asce(void);
static inline int __range_ok(unsigned long addr, unsigned long size)
{
@@ -88,7 +72,7 @@ int __get_user_bad(void) __attribute__((noreturn));
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x010000UL;
+ unsigned long spec = 0x810000UL;
int rc;
switch (size) {
@@ -121,7 +105,7 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x01UL;
+ unsigned long spec = 0x81UL;
int rc;
switch (size) {
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 29b44a930e71..f65590889054 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -12,32 +12,9 @@
#ifndef __ASSEMBLY__
-/*
- * Note about the vdso_data and vdso_per_cpu_data structures:
- *
- * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
- * structure is supposed to be known only to the function in the vdso
- * itself and may change without notice.
- */
-
-struct vdso_per_cpu_data {
- /*
- * Note: node_id and cpu_nr must be at adjacent memory locations.
- * VDSO userspace must read both values with a single instruction.
- */
- union {
- __u64 getcpu_val;
- struct {
- __u32 node_id;
- __u32 cpu_nr;
- };
- };
-};
-
extern struct vdso_data *vdso_data;
-int vdso_alloc_per_cpu(struct lowcore *lowcore);
-void vdso_free_per_cpu(struct lowcore *lowcore);
+void vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
index 3622d4ebc73a..fac6a67988eb 100644
--- a/arch/s390/include/asm/vtime.h
+++ b/arch/s390/include/asm/vtime.h
@@ -2,7 +2,6 @@
#ifndef _S390_VTIME_H
#define _S390_VTIME_H
-#define __ARCH_HAS_VTIME_ACCOUNT
#define __ARCH_HAS_VTIME_TASK_SWITCH
#endif /* _S390_VTIME_H */
diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h
index 9a14a611ed82..0189f326aac5 100644
--- a/arch/s390/include/uapi/asm/signal.h
+++ b/arch/s390/include/uapi/asm/signal.h
@@ -65,30 +65,6 @@ typedef unsigned long sigset_t;
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 2048