aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 11:47:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 11:47:01 -0800
commitd60a540ac5f2fbab3e6fe592717b445bd7343a91 (patch)
treecdfe23e7c2cb753aba10e5edc7c38972eac25ed0 /arch/s390/include
parentMerge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu (diff)
parentMAINTAINERS: add virtio-ccw.h to virtio/s390 section (diff)
downloadlinux-dev-d60a540ac5f2fbab3e6fe592717b445bd7343a91.tar.xz
linux-dev-d60a540ac5f2fbab3e6fe592717b445bd7343a91.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens: "Since Martin is on vacation you get the s390 pull request for the v4.15 merge window this time from me. Besides a lot of cleanups and bug fixes these are the most important changes: - a new regset for runtime instrumentation registers - hardware accelerated AES-GCM support for the aes_s390 module - support for the new CEX6S crypto cards - support for FORTIFY_SOURCE - addition of missing z13 and new z14 instructions to the in-kernel disassembler - generate opcode tables for the in-kernel disassembler out of a simple text file instead of having to manually maintain those tables - fast memset16, memset32 and memset64 implementations - removal of named saved segment support - hardware counter support for z14 - queued spinlocks and queued rwlocks implementations for s390 - use the stack_depth tracking feature for s390 BPF JIT - a new s390_sthyi system call which emulates the sthyi (store hypervisor information) instruction - removal of the old KVM virtio transport - an s390 specific CPU alternatives implementation which is used in the new spinlock code" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (88 commits) MAINTAINERS: add virtio-ccw.h to virtio/s390 section s390/noexec: execute kexec datamover without DAT s390: fix transactional execution control register handling s390/bpf: take advantage of stack_depth tracking s390: simplify transactional execution elf hwcap handling s390/zcrypt: Rework struct ap_qact_ap_info. s390/virtio: remove unused header file kvm_virtio.h s390: avoid undefined behaviour s390/disassembler: generate opcode tables from text file s390/disassembler: remove insn_to_mnemonic() s390/dasd: avoid calling do_gettimeofday() s390: vfio-ccw: Do not attempt to free no-op, test and tic cda. s390: remove named saved segment support s390/archrandom: Reconsider s390 arch random implementation s390/pci: do not require AIS facility s390/qdio: sanitize put_indicator s390/qdio: use atomic_cmpxchg s390/nmi: avoid using long-displacement facility s390: pass endianness info to sparse s390/decompressor: remove informational messages ...
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/alternative.h163
-rw-r--r--arch/s390/include/asm/archrandom.h26
-rw-r--r--arch/s390/include/asm/atomic_ops.h32
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/include/asm/cpacf.h52
-rw-r--r--arch/s390/include/asm/ctl_reg.h32
-rw-r--r--arch/s390/include/asm/debug.h190
-rw-r--r--arch/s390/include/asm/dis.h28
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/kprobes.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/nmi.h19
-rw-r--r--arch/s390/include/asm/pci_debug.h6
-rw-r--r--arch/s390/include/asm/pci_insn.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h18
-rw-r--r--arch/s390/include/asm/processor.h8
-rw-r--r--arch/s390/include/asm/runtime_instr.h86
-rw-r--r--arch/s390/include/asm/rwsem.h211
-rw-r--r--arch/s390/include/asm/sections.h2
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/smp.h5
-rw-r--r--arch/s390/include/asm/spinlock.h167
-rw-r--r--arch/s390/include/asm/spinlock_types.h4
-rw-r--r--arch/s390/include/asm/string.h46
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/include/asm/sysinfo.h4
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/include/asm/vdso.h1
-rw-r--r--arch/s390/include/uapi/asm/kvm_virtio.h65
-rw-r--r--arch/s390/include/uapi/asm/sthyi.h6
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
33 files changed, 537 insertions, 660 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 6e2c9f7e47fa..41c211a4d8b1 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
+generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..6c268f6a51d3
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,163 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+#ifdef CONFIG_ALTERNATIVES
+extern void apply_alternative_instructions(void);
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+#else
+static inline void apply_alternative_instructions(void) {};
+static inline void apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end) {};
+#endif
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+#ifdef CONFIG_ALTERNATIVES
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+#else
+/* Alternative instructions are disabled, let's put just oldinstr in */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ oldinstr "\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ oldinstr "\n"
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index e9f7d7a57f99..09aed1095336 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -28,42 +28,42 @@ static void s390_arch_random_generate(u8 *buf, unsigned int nbytes)
static inline bool arch_has_random(void)
{
- if (static_branch_likely(&s390_arch_random_available))
- return true;
return false;
}
static inline bool arch_has_random_seed(void)
{
- return arch_has_random();
+ if (static_branch_likely(&s390_arch_random_available))
+ return true;
+ return false;
}
static inline bool arch_get_random_long(unsigned long *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
- s390_arch_random_generate((u8 *)v, sizeof(*v));
- return true;
- }
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
- s390_arch_random_generate((u8 *)v, sizeof(*v));
- return true;
- }
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
{
- return arch_get_random_long(v);
+ if (static_branch_likely(&s390_arch_random_available)) {
+ s390_arch_random_generate((u8 *)v, sizeof(*v));
+ return true;
+ }
+ return false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
- return arch_get_random_int(v);
+ if (static_branch_likely(&s390_arch_random_available)) {
+ s390_arch_random_generate((u8 *)v, sizeof(*v));
+ return true;
+ }
+ return false;
}
#endif /* CONFIG_ARCH_RANDOM */
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index f479e4c0b87e..d3f09526ee19 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -40,19 +40,24 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
#undef __ATOMIC_OPS
#undef __ATOMIC_OP
-static inline void __atomic_add_const(int val, int *ptr)
-{
- asm volatile(
- " asi %[ptr],%[val]\n"
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
+static inline void op_name(op_type val, op_type *ptr) \
+{ \
+ asm volatile( \
+ op_string " %[ptr],%[val]\n" \
+ op_barrier \
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
}
-static inline void __atomic64_add_const(long val, long *ptr)
-{
- asm volatile(
- " agsi %[ptr],%[val]\n"
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
-}
+#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
+ __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
+__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
+
+#undef __ATOMIC_CONST_OPS
+#undef __ATOMIC_CONST_OP
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
@@ -108,6 +113,11 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
+#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
+#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
+#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
+#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
+
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index b00777ce93b4..99aa817dad32 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -42,6 +42,7 @@ struct ccwgroup_device {
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @driver: embedded driver structure
+ * @ccw_driver: supported ccw_driver (optional)
*/
struct ccwgroup_driver {
int (*setup) (struct ccwgroup_device *);
@@ -56,6 +57,7 @@ struct ccwgroup_driver {
int (*restore)(struct ccwgroup_device *);
struct device_driver driver;
+ struct ccw_driver *ccw_driver;
};
extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 056670ebba67..3cc52e37b4b2 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -2,7 +2,7 @@
/*
* CP Assist for Cryptographic Functions (CPACF)
*
- * Copyright IBM Corp. 2003, 2016
+ * Copyright IBM Corp. 2003, 2017
* Author(s): Thomas Spatzier
* Jan Glauber
* Harald Freudenberger (freude@de.ibm.com)
@@ -134,6 +134,22 @@
#define CPACF_PRNO_TRNG_Q_R2C_RATIO 0x70
#define CPACF_PRNO_TRNG 0x72
+/*
+ * Function codes for the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KMA_QUERY 0x00
+#define CPACF_KMA_GCM_AES_128 0x12
+#define CPACF_KMA_GCM_AES_192 0x13
+#define CPACF_KMA_GCM_AES_256 0x14
+
+/*
+ * Flags for the KMA (CIPHER MESSAGE WITH AUTHENTICATION) instruction
+ */
+#define CPACF_KMA_LPC 0x100 /* Last-Plaintext/Ciphertext */
+#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
+#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
/**
@@ -179,6 +195,8 @@ static inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(77); /* check for MSA4 */
case CPACF_PRNO:
return test_facility(57); /* check for MSA5 */
+ case CPACF_KMA:
+ return test_facility(146); /* check for MSA8 */
default:
BUG();
}
@@ -470,4 +488,36 @@ static inline void cpacf_pckmo(long func, void *param)
: "cc", "memory");
}
+/**
+ * cpacf_kma() - executes the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
+ * instruction
+ * @func: the function code passed to KMA; see CPACF_KMA_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @aad: address of additional authenticated data memory area
+ * @aad_len: length of aad operand in bytes
+ */
+static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
+ const u8 *src, unsigned long src_len,
+ const u8 *aad, unsigned long aad_len)
+{
+ register unsigned long r0 asm("0") = (unsigned long) func;
+ register unsigned long r1 asm("1") = (unsigned long) param;
+ register unsigned long r2 asm("2") = (unsigned long) src;
+ register unsigned long r3 asm("3") = (unsigned long) src_len;
+ register unsigned long r4 asm("4") = (unsigned long) aad;
+ register unsigned long r5 asm("5") = (unsigned long) aad_len;
+ register unsigned long r6 asm("6") = (unsigned long) dest;
+
+ asm volatile(
+ "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
+ " brc 1,0b\n" /* handle partial completion */
+ : [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
+ [aad] "+a" (r4), [alen] "+d" (r5)
+ : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
+ : "cc", "memory");
+}
+
#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 93e0d72f6c94..99c93d0346f9 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,6 +8,18 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
+#include <linux/const.h>
+
+#define CR2_GUARDED_STORAGE _BITUL(63 - 59)
+
+#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35)
+#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36)
+#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38)
+#define CR14_WARNING_SUBMASK _BITUL(63 - 39)
+
+#ifndef __ASSEMBLY__
+
#include <linux/bug.h>
#define __ctl_load(array, low, high) do { \
@@ -55,7 +67,11 @@ void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
- unsigned long : 32;
+ unsigned long : 8;
+ unsigned long tcx : 1; /* Transactional-Execution control */
+ unsigned long pifo : 1; /* Transactional-Execution Program-
+ Interruption-Filtering Override */
+ unsigned long : 22;
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
@@ -71,6 +87,19 @@ union ctlreg0 {
};
};
+union ctlreg2 {
+ unsigned long val;
+ struct {
+ unsigned long : 33;
+ unsigned long ducto : 25;
+ unsigned long : 1;
+ unsigned long gse : 1;
+ unsigned long : 1;
+ unsigned long tds : 1;
+ unsigned long tdc : 2;
+ };
+};
+
#ifdef CONFIG_SMP
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
@@ -79,4 +108,5 @@ union ctlreg0 {
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#endif
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index a4ed25dd3278..c305d39f5016 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -14,71 +14,71 @@
#include <linux/refcount.h>
#include <uapi/asm/debug.h>
-#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
-#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
-#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
-#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
-#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
-#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
+#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
+#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
+#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
+#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
+#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
+#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
-#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
- /* the entry information */
+#define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */
+ /* the entry information */
typedef struct __debug_entry debug_entry_t;
struct debug_view;
-typedef struct debug_info {
- struct debug_info* next;
- struct debug_info* prev;
+typedef struct debug_info {
+ struct debug_info *next;
+ struct debug_info *prev;
refcount_t ref_count;
- spinlock_t lock;
+ spinlock_t lock;
int level;
int nr_areas;
int pages_per_area;
int buf_size;
- int entry_size;
- debug_entry_t*** areas;
+ int entry_size;
+ debug_entry_t ***areas;
int active_area;
int *active_pages;
int *active_entries;
- struct dentry* debugfs_root_entry;
- struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
- struct debug_view* views[DEBUG_MAX_VIEWS];
+ struct dentry *debugfs_root_entry;
+ struct dentry *debugfs_entries[DEBUG_MAX_VIEWS];
+ struct debug_view *views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
umode_t mode;
} debug_info_t;
-typedef int (debug_header_proc_t) (debug_info_t* id,
- struct debug_view* view,
+typedef int (debug_header_proc_t) (debug_info_t *id,
+ struct debug_view *view,
int area,
- debug_entry_t* entry,
- char* out_buf);
-
-typedef int (debug_format_proc_t) (debug_info_t* id,
- struct debug_view* view, char* out_buf,
- const char* in_buf);
-typedef int (debug_prolog_proc_t) (debug_info_t* id,
- struct debug_view* view,
- char* out_buf);
-typedef int (debug_input_proc_t) (debug_info_t* id,
- struct debug_view* view,
- struct file* file,
+ debug_entry_t *entry,
+ char *out_buf);
+
+typedef int (debug_format_proc_t) (debug_info_t *id,
+ struct debug_view *view, char *out_buf,
+ const char *in_buf);
+typedef int (debug_prolog_proc_t) (debug_info_t *id,
+ struct debug_view *view,
+ char *out_buf);
+typedef int (debug_input_proc_t) (debug_info_t *id,
+ struct debug_view *view,
+ struct file *file,
const char __user *user_buf,
- size_t in_buf_size, loff_t* offset);
+ size_t in_buf_size, loff_t *offset);
+
+int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf);
-int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
- int area, debug_entry_t* entry, char* out_buf);
-
struct debug_view {
char name[DEBUG_MAX_NAME_LEN];
- debug_prolog_proc_t* prolog_proc;
- debug_header_proc_t* header_proc;
- debug_format_proc_t* format_proc;
- debug_input_proc_t* input_proc;
- void* private_data;
+ debug_prolog_proc_t *prolog_proc;
+ debug_header_proc_t *header_proc;
+ debug_format_proc_t *format_proc;
+ debug_input_proc_t *input_proc;
+ void *private_data;
};
extern struct debug_view debug_hex_ascii_view;
@@ -87,65 +87,67 @@ extern struct debug_view debug_sprintf_view;
/* do NOT use the _common functions */
-debug_entry_t* debug_event_common(debug_info_t* id, int level,
- const void* data, int length);
+debug_entry_t *debug_event_common(debug_info_t *id, int level,
+ const void *data, int length);
-debug_entry_t* debug_exception_common(debug_info_t* id, int level,
- const void* data, int length);
+debug_entry_t *debug_exception_common(debug_info_t *id, int level,
+ const void *data, int length);
/* Debug Feature API: */
debug_info_t *debug_register(const char *name, int pages, int nr_areas,
- int buf_size);
+ int buf_size);
debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
int buf_size, umode_t mode, uid_t uid,
gid_t gid);
-void debug_unregister(debug_info_t* id);
+void debug_unregister(debug_info_t *id);
-void debug_set_level(debug_info_t* id, int new_level);
+void debug_set_level(debug_info_t *id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
-static inline bool debug_level_enabled(debug_info_t* id, int level)
+static inline bool debug_level_enabled(debug_info_t *id, int level)
{
return level <= id->level;
}
-static inline debug_entry_t*
-debug_event(debug_info_t* id, int level, void* data, int length)
+static inline debug_entry_t *debug_event(debug_info_t *id, int level,
+ void *data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,data,length);
+ return debug_event_common(id, level, data, length);
}
-static inline debug_entry_t*
-debug_int_event(debug_info_t* id, int level, unsigned int tag)
+static inline debug_entry_t *debug_int_event(debug_info_t *id, int level,
+ unsigned int tag)
{
- unsigned int t=tag;
+ unsigned int t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,&t,sizeof(unsigned int));
+ return debug_event_common(id, level, &t, sizeof(unsigned int));
}
-static inline debug_entry_t *
-debug_long_event (debug_info_t* id, int level, unsigned long tag)
+static inline debug_entry_t *debug_long_event(debug_info_t *id, int level,
+ unsigned long tag)
{
- unsigned long t=tag;
+ unsigned long t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,&t,sizeof(unsigned long));
+ return debug_event_common(id, level, &t, sizeof(unsigned long));
}
-static inline debug_entry_t*
-debug_text_event(debug_info_t* id, int level, const char* txt)
+static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
+ const char *txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,txt,strlen(txt));
+ return debug_event_common(id, level, txt, strlen(txt));
}
/*
@@ -161,6 +163,7 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
debug_entry_t *__ret; \
debug_info_t *__id = _id; \
int __level = _level; \
+ \
if ((!__id) || (__level > __id->level)) \
__ret = NULL; \
else \
@@ -169,38 +172,40 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
__ret; \
})
-static inline debug_entry_t*
-debug_exception(debug_info_t* id, int level, void* data, int length)
+static inline debug_entry_t *debug_exception(debug_info_t *id, int level,
+ void *data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,data,length);
+ return debug_exception_common(id, level, data, length);
}
-static inline debug_entry_t*
-debug_int_exception(debug_info_t* id, int level, unsigned int tag)
+static inline debug_entry_t *debug_int_exception(debug_info_t *id, int level,
+ unsigned int tag)
{
- unsigned int t=tag;
+ unsigned int t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,&t,sizeof(unsigned int));
+ return debug_exception_common(id, level, &t, sizeof(unsigned int));
}
-static inline debug_entry_t *
-debug_long_exception (debug_info_t* id, int level, unsigned long tag)
+static inline debug_entry_t *debug_long_exception (debug_info_t *id, int level,
+ unsigned long tag)
{
- unsigned long t=tag;
+ unsigned long t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,&t,sizeof(unsigned long));
+ return debug_exception_common(id, level, &t, sizeof(unsigned long));
}
-static inline debug_entry_t*
-debug_text_exception(debug_info_t* id, int level, const char* txt)
+static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
+ const char *txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,txt,strlen(txt));
+ return debug_exception_common(id, level, txt, strlen(txt));
}
/*
@@ -216,6 +221,7 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
debug_entry_t *__ret; \
debug_info_t *__id = _id; \
int __level = _level; \
+ \
if ((!__id) || (__level > __id->level)) \
__ret = NULL; \
else \
@@ -224,13 +230,13 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
__ret; \
})
-int debug_register_view(debug_info_t* id, struct debug_view* view);
-int debug_unregister_view(debug_info_t* id, struct debug_view* view);
+int debug_register_view(debug_info_t *id, struct debug_view *view);
+int debug_unregister_view(debug_info_t *id, struct debug_view *view);
/*
define the debug levels:
- 0 No debugging output to console or syslog
- - 1 Log internal errors to syslog, ignore check conditions
+ - 1 Log internal errors to syslog, ignore check conditions
- 2 Log internal errors and check conditions to syslog
- 3 Log internal errors to console, log check conditions to syslog
- 4 Log internal errors and check conditions to console
@@ -248,17 +254,17 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view);
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
#if DEBUG_LEVEL > 0
-#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
-#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
-#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
-#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_INFO(x...) printk(KERN_INFO PRINTK_HEADER x)
+#define PRINT_WARN(x...) printk(KERN_WARNING PRINTK_HEADER x)
+#define PRINT_ERR(x...) printk(KERN_ERR PRINTK_HEADER x)
+#define PRINT_FATAL(x...) panic(PRINTK_HEADER x)
#else
-#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#endif /* DASD_DEBUG */
-
-#endif /* DEBUG_H */
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_INFO(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_WARN(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_ERR(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#endif /* DASD_DEBUG */
+
+#endif /* DEBUG_H */
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 78d1b2d725b9..b0480c60a8e1 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -9,32 +9,7 @@
#ifndef __ASM_S390_DIS_H__
#define __ASM_S390_DIS_H__
-/* Type of operand */
-#define OPERAND_GPR 0x1 /* Operand printed as %rx */
-#define OPERAND_FPR 0x2 /* Operand printed as %fx */
-#define OPERAND_AR 0x4 /* Operand printed as %ax */
-#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_VR 0x10 /* Operand printed as %vx */
-#define OPERAND_DISP 0x20 /* Operand printed as displacement */
-#define OPERAND_BASE 0x40 /* Operand printed as base register */
-#define OPERAND_INDEX 0x80 /* Operand printed as index register */
-#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
-
-
-struct s390_operand {
- int bits; /* The number of bits in the operand. */
- int shift; /* The number of bits to shift. */
- int flags; /* One bit syntax flags. */
-};
-
-struct s390_insn {
- const char name[5];
- unsigned char opfrag;
- unsigned char format;
-};
-
+#include <generated/dis.h>
static inline int insn_length(unsigned char code)
{
@@ -45,7 +20,6 @@ struct pt_regs;
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
-int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
struct s390_insn *find_insn(unsigned char *code);
static inline int is_known_insn(unsigned char *code)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 5a8d92758a58..186c7b5f5511 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -13,6 +13,8 @@
#include <asm/cio.h>
#include <asm/setup.h>
+#define NSS_NAME_SIZE 8
+
#define IPL_PARMBLOCK_ORIGIN 0x2000
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
@@ -106,7 +108,6 @@ extern size_t append_ipl_scpdata(char *, size_t);
enum {
IPL_DEVNO_VALID = 1,
IPL_PARMBLOCK_VALID = 2,
- IPL_NSS_VALID = 4,
};
enum ipl_type {
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 28792ef82c83..921391f2341e 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -63,8 +63,6 @@ typedef u16 kprobe_opcode_t;
#define kretprobe_blacklist_size 0
-#define KPROBE_SWAP_INST 0x10
-
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 51375e766e90..fd006a272024 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -736,7 +736,6 @@ struct kvm_arch{
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
- struct ratelimit_state sthyi_limit;
spinlock_t start_stop_lock;
struct sie_page2 *sie_page2;
struct kvm_s390_cpu_model model;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 917f7344cab6..9eb36a1592c7 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -134,8 +134,9 @@ struct lowcore {
__u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
__u64 gmap; /* 0x03b8 */
__u32 spinlock_lockval; /* 0x03c0 */
- __u32 fpu_flags; /* 0x03c4 */
- __u8 pad_0x03c8[0x0400-0x03c8]; /* 0x03c8 */
+ __u32 spinlock_index; /* 0x03c4 */
+ __u32 fpu_flags; /* 0x03c8 */
+ __u8 pad_0x03cc[0x0400-0x03cc]; /* 0x03cc */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index c8a7beadd3d4..1e5dc4537bf2 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -26,12 +26,9 @@
#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
-
-#define MCCK_CR14_CR_PENDING_SUB_MASK (1 << 28)
-#define MCCK_CR14_RECOVERY_SUB_MASK (1 << 27)
-#define MCCK_CR14_DEGRAD_SUB_MASK (1 << 26)
-#define MCCK_CR14_EXT_DAMAGE_SUB_MASK (1 << 25)
-#define MCCK_CR14_WARN_SUB_MASK (1 << 24)
+#define MCCK_CODE_CR_VALID _BITUL(63 - 29)
+#define MCCK_CODE_GS_VALID _BITUL(63 - 36)
+#define MCCK_CODE_FC_VALID _BITUL(63 - 43)
#ifndef __ASSEMBLY__
@@ -87,6 +84,8 @@ union mci {
#define MCESA_ORIGIN_MASK (~0x3ffUL)
#define MCESA_LC_MASK (0xfUL)
+#define MCESA_MIN_SIZE (1024)
+#define MCESA_MAX_SIZE (2048)
struct mcesa {
u8 vector_save_area[1024];
@@ -95,8 +94,12 @@ struct mcesa {
struct pt_regs;
-extern void s390_handle_mcck(void);
-extern void s390_do_machine_check(struct pt_regs *regs);
+void nmi_alloc_boot_cpu(struct lowcore *lc);
+int nmi_alloc_per_cpu(struct lowcore *lc);
+void nmi_free_per_cpu(struct lowcore *lc);
+
+void s390_handle_mcck(void);
+void s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6c2c38060f8b..5dfe47588277 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -19,11 +19,7 @@ extern debug_info_t *pci_debug_err_id;
static inline void zpci_err_hex(void *addr, int len)
{
- while (len > 0) {
- debug_event(pci_debug_err_id, 0, (void *) addr, len);
- len -= pci_debug_err_id->buf_size;
- addr += pci_debug_err_id->buf_size;
- }
+ debug_event(pci_debug_err_id, 0, addr, len);
}
#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 419e83fa4721..ba22a6ea51a1 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bbe99cb8219d..c7b4333d1de0 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -13,6 +13,7 @@
#define _S390_PGALLOC_H
#include <linux/threads.h>
+#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/mm.h>
@@ -28,24 +29,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste;
-static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
-{
- struct addrtype { char _[256]; };
- int i;
-
- for (i = 0; i < n; i += 256) {
- *s = val;
- asm volatile(
- "mvc 8(248,%[s]),0(%[s])\n"
- : "+m" (*(struct addrtype *) s)
- : [s] "a" (s));
- s += 256 / sizeof(long);
- }
-}
-
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{
- clear_table(crst, entry, _CRST_TABLE_SIZE);
+ memset64((u64 *)crst, entry, _CRST_ENTRIES);
}
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9cf92abe23c3..f25bfe888933 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -22,6 +22,7 @@
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
+#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
@@ -31,6 +32,7 @@
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST)
+#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU)
#ifndef __ASSEMBLY__
@@ -219,10 +221,10 @@ void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
+static inline void release_thread(struct task_struct *tsk) { }
-/* Free guarded storage control block for current */
-void exit_thread_gs(void);
+/* Free guarded storage control block */
+void guarded_storage_release(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index ea8896ba5afc..6b1540337ed6 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -6,55 +6,55 @@
#define S390_RUNTIME_INSTR_STOP 0x2
struct runtime_instr_cb {
- __u64 buf_current;
- __u64 buf_origin;
- __u64 buf_limit;
+ __u64 rca;
+ __u64 roa;
+ __u64 rla;
- __u32 valid : 1;
- __u32 pstate : 1;
- __u32 pstate_set_buf : 1;
- __u32 home_space : 1;
- __u32 altered : 1;
- __u32 : 3;
- __u32 pstate_sample : 1;
- __u32 sstate_sample : 1;
- __u32 pstate_collect : 1;
- __u32 sstate_collect : 1;
- __u32 : 1;
- __u32 halted_int : 1;
- __u32 int_requested : 1;
- __u32 buffer_full_int : 1;
+ __u32 v : 1;
+ __u32 s : 1;
+ __u32 k : 1;
+ __u32 h : 1;
+ __u32 a : 1;
+ __u32 reserved1 : 3;
+ __u32 ps : 1;
+ __u32 qs : 1;
+ __u32 pc : 1;
+ __u32 qc : 1;
+ __u32 reserved2 : 1;
+ __u32 g : 1;
+ __u32 u : 1;
+ __u32 l : 1;
__u32 key : 4;
- __u32 : 9;
+ __u32 reserved3 : 8;
+ __u32 t : 1;
__u32 rgs : 3;
- __u32 mode : 4;
- __u32 next : 1;
+ __u32 m : 4;
+ __u32 n : 1;
__u32 mae : 1;
- __u32 : 2;
- __u32 call_type_br : 1;
- __u32 return_type_br : 1;
- __u32 other_type_br : 1;
- __u32 bc_other_type : 1;
- __u32 emit : 1;
- __u32 tx_abort : 1;
- __u32 : 2;
- __u32 bp_xn : 1;
- __u32 bp_xt : 1;
- __u32 bp_ti : 1;
- __u32 bp_ni : 1;
- __u32 suppr_y : 1;
- __u32 suppr_z : 1;
+ __u32 reserved4 : 2;
+ __u32 c : 1;
+ __u32 r : 1;
+ __u32 b : 1;
+ __u32 j : 1;
+ __u32 e : 1;
+ __u32 x : 1;
+ __u32 reserved5 : 2;
+ __u32 bpxn : 1;
+ __u32 bpxt : 1;
+ __u32 bpti : 1;
+ __u32 bpni : 1;
+ __u32 reserved6 : 2;
- __u32 dc_miss_extra : 1;
- __u32 lat_lev_ignore : 1;
- __u32 ic_lat_lev : 4;
- __u32 dc_lat_lev : 4;
+ __u32 d : 1;
+ __u32 f : 1;
+ __u32 ic : 4;
+ __u32 dc : 4;
- __u64 reserved1;
- __u64 scaling_factor;
+ __u64 reserved7;
+ __u64 sf;
__u64 rsic;
- __u64 reserved2;
+ __u64 reserved8;
} __packed __aligned(8);
extern struct runtime_instr_cb runtime_instr_empty_cb;
@@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
load_runtime_instr_cb(&runtime_instr_empty_cb);
}
-void exit_thread_runtime_instr(void);
+struct task_struct;
+
+void runtime_instr_release(struct task_struct *tsk);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
deleted file mode 100644
index f731b7b518bd..000000000000
--- a/arch/s390/include/asm/rwsem.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _S390_RWSEM_H
-#define _S390_RWSEM_H
-
-/*
- * S390 version
- * Copyright IBM Corp. 2002
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-/*
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
-#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- if (old < 0)
- rwsem_down_read_failed(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: ltgr %1,%0\n"
- " jm 1f\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- return old >= 0 ? 1 : 0;
-}
-
-/*
- * lock for writing
- */
-static inline long ___down_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = RWSEM_ACTIVE_WRITE_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
-
- return old;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- signed long old;
-
- asm volatile(
- " lg %0,%1\n"
- "0: ltgr %0,%0\n"
- " jnz 1f\n"
- " csg %0,%3,%1\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old), "=Q" (sem->count)
- : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
- : "cc", "memory");
- return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- if (new < 0)
- if ((new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = -RWSEM_ACTIVE_WRITE_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
- if (new < 0)
- if ((new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = -RWSEM_WAITING_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
- if (new > 1)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 0ac3e8166e85..54f81f8ed662 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,6 @@
#include <asm-generic/sections.h>
-extern char _eshared[], _ehead[];
+extern char _ehead[];
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f2c2b7cd9099..8bc87dcb10eb 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -98,9 +98,6 @@ extern char vmpoff_cmd[];
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
-#define NSS_NAME_SIZE 8
-extern char kernel_nss_name[];
-
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index babe83ed416c..3907ead27ffa 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -28,6 +28,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
+extern void smp_emergency_stop(void);
extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
@@ -53,6 +54,10 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
func(data);
}
+static inline void smp_emergency_stop(void)
+{
+}
+
static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f3f5e0155b10..31f95a636aed 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -14,6 +14,7 @@
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/processor.h>
+#include <asm/alternative.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
@@ -36,20 +37,15 @@ bool arch_vcpu_is_preempted(int cpu);
* (the type definitions are in asm/spinlock_types.h)
*/
-void arch_lock_relax(int cpu);
+void arch_spin_relax(arch_spinlock_t *lock);
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
-void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
-
-static inline void arch_spin_relax(arch_spinlock_t *lock)
-{
- arch_lock_relax(lock->lock);
-}
+void arch_spin_lock_setup(int cpu);
static inline u32 arch_spin_lockval(int cpu)
{
- return ~cpu;
+ return cpu + 1;
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -65,8 +61,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
barrier();
- return likely(arch_spin_value_unlocked(*lp) &&
- __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -79,7 +74,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
if (!arch_spin_trylock_once(lp))
- arch_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait(lp);
}
static inline int arch_spin_trylock(arch_spinlock_t *lp)
@@ -93,11 +88,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0070\n" /* NIAI 7 */
-#endif
- " st %1,%0\n"
- : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
+ ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+ " sth %1,%0\n"
+ : "=Q" (((unsigned short *) &lp->lock)[1])
+ : "d" (0) : "cc", "memory");
}
/*
@@ -115,164 +109,63 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) (((x)->cnts & 0xffff0000) == 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
-extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
-extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
+#define arch_write_can_lock(x) ((x)->cnts == 0)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+#define arch_read_relax(rw) barrier()
+#define arch_write_relax(rw) barrier()
-static inline int arch_read_trylock_once(arch_rwlock_t *rw)
-{
- int old = ACCESS_ONCE(rw->lock);
- return likely(old >= 0 &&
- __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
-}
-
-static inline int arch_write_trylock_once(arch_rwlock_t *rw)
-{
- int old = ACCESS_ONCE(rw->lock);
- return likely(old == 0 &&
- __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
-}
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __RAW_OP_OR "lao"
-#define __RAW_OP_AND "lan"
-#define __RAW_OP_ADD "laa"
-
-#define __RAW_LOCK(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(int *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- "bcr 14,0\n" \
- : "=d" (old_val), "+Q" (*ptr) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#define __RAW_UNLOCK(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(int *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- : "=d" (old_val), "+Q" (*ptr) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
+void arch_read_lock_wait(arch_rwlock_t *lp);
+void arch_write_lock_wait(arch_rwlock_t *lp);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
int old;
- old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
- if (old < 0)
- _raw_read_lock_wait(rw);
+ old = __atomic_add(1, &rw->cnts);
+ if (old & 0xffff0000)
+ arch_read_lock_wait(rw);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+ __atomic_add_const_barrier(-1, &rw->cnts);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- int old;
-
- old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
- if (old != 0)
- _raw_write_lock_wait(rw, old);
- rw->owner = SPINLOCK_LOCKVAL;
+ if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+ arch_write_lock_wait(rw);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- rw->owner = 0;
- __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+ __atomic_add_barrier(-0x30000, &rw->cnts);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp);
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- if (!arch_read_trylock_once(rw))
- _raw_read_lock_wait(rw);
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int old;
- do {
- old = ACCESS_ONCE(rw->lock);
- } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- if (!arch_write_trylock_once(rw))
- _raw_write_lock_wait(rw);
- rw->owner = SPINLOCK_LOCKVAL;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- typecheck(int, rw->lock);
-
- rw->owner = 0;
- asm volatile(
- "st %1,%0\n"
- : "+Q" (rw->lock)
- : "d" (0)
- : "cc", "memory");
-}
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- if (!arch_read_trylock_once(rw))
- return _raw_read_trylock_retry(rw);
- return 1;
+ old = READ_ONCE(rw->cnts);
+ return (!(old & 0xffff0000) &&
+ __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
- return 0;
- rw->owner = SPINLOCK_LOCKVAL;
- return 1;
-}
-
-static inline void arch_read_relax(arch_rwlock_t *rw)
-{
- arch_lock_relax(rw->owner);
-}
+ int old;
-static inline void arch_write_relax(arch_rwlock_t *rw)
-{
- arch_lock_relax(rw->owner);
+ old = READ_ONCE(rw->cnts);
+ return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 1861a0c5dd47..cfed272e4fd5 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
typedef struct {
- int lock;
- int owner;
+ int cnts;
+ arch_spinlock_t wait;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 27ce494198f5..50f26fc9acb2 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -18,6 +18,9 @@
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMSET16 /* arch function */
+#define __HAVE_ARCH_MEMSET32 /* arch function */
+#define __HAVE_ARCH_MEMSET64 /* arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
#define __HAVE_ARCH_STRCMP /* arch function */
#define __HAVE_ARCH_STRCPY /* inline & arch function */
@@ -31,17 +34,17 @@
#define __HAVE_ARCH_STRSTR /* arch function */
/* Prototypes for non-inlined arch strings functions. */
-extern int memcmp(const void *, const void *, size_t);
-extern void *memcpy(void *, const void *, size_t);
-extern void *memset(void *, int, size_t);
-extern void *memmove(void *, const void *, size_t);
-extern int strcmp(const char *,const char *);
-extern size_t strlcat(char *, const char *, size_t);
-extern size_t strlcpy(char *, const char *, size_t);
-extern char *strncat(char *, const char *, size_t);
-extern char *strncpy(char *, const char *, size_t);
-extern char *strrchr(const char *, int);
-extern char *strstr(const char *, const char *);
+int memcmp(const void *s1, const void *s2, size_t n);
+void *memcpy(void *dest, const void *src, size_t n);
+void *memset(void *s, int c, size_t n);
+void *memmove(void *dest, const void *src, size_t n);
+int strcmp(const char *s1, const char *s2);
+size_t strlcat(char *dest, const char *src, size_t n);
+size_t strlcpy(char *dest, const char *src, size_t size);
+char *strncat(char *dest, const char *src, size_t n);
+char *strncpy(char *dest, const char *src, size_t n);
+char *strrchr(const char *s, int c);
+char *strstr(const char *s1, const char *s2);
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
@@ -50,7 +53,26 @@ extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
-#if !defined(IN_ARCH_STRING_C)
+void *__memset16(uint16_t *s, uint16_t v, size_t count);
+void *__memset32(uint32_t *s, uint32_t v, size_t count);
+void *__memset64(uint64_t *s, uint64_t v, size_t count);
+
+static inline void *memset16(uint16_t *s, uint16_t v, size_t count)
+{
+ return __memset16(s, v, count * sizeof(v));
+}
+
+static inline void *memset32(uint32_t *s, uint32_t v, size_t count)
+{
+ return __memset32(s, v, count * sizeof(v));
+}
+
+static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
+{
+ return __memset64(s, v, count * sizeof(v));
+}
+
+#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY))
static inline void *memchr(const void * s, int c, size_t n)
{
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index c21fe1d57c00..ec7b476c1ac5 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs)
save_ri_cb(prev->thread.ri_cb); \
save_gs_cb(prev->thread.gs_cb); \
} \
+ update_cr_regs(next); \
if (next->mm) { \
- update_cr_regs(next); \
set_cpu_flag(CIF_FPU); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2b498e58b914..a702cb9d4269 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -156,7 +156,8 @@ static inline unsigned char topology_mnest_limit(void)
struct topology_core {
unsigned char nl;
unsigned char reserved0[3];
- unsigned char :6;
+ unsigned char :5;
+ unsigned char d:1;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
@@ -198,4 +199,5 @@ struct service_level {
int register_service_level(struct service_level *);
int unregister_service_level(struct service_level *);
+int sthyi_fill(void *dst, u64 *rc);
#endif /* __ASM_S390_SYSINFO_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 55de4eb73604..1807229b292f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -17,6 +17,7 @@ struct cpu_topology_s390 {
unsigned short book_id;
unsigned short drawer_id;
unsigned short node_id;
+ unsigned short dedicated : 1;
cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
@@ -35,6 +36,7 @@ extern cpumask_t cpus_with_topology;
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
+#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
#define mc_capable() 1
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bb2ce72300b0..ae6261ef97d5 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -47,6 +47,7 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data;
+void vdso_alloc_boot_cpu(struct lowcore *lowcore);
int vdso_alloc_per_cpu(struct lowcore *lowcore);
void vdso_free_per_cpu(struct lowcore *lowcore);
diff --git a/arch/s390/include/uapi/asm/kvm_virtio.h b/arch/s390/include/uapi/asm/kvm_virtio.h
deleted file mode 100644
index 73283677a132..000000000000
--- a/arch/s390/include/uapi/asm/kvm_virtio.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * definition for virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#ifndef __KVM_S390_VIRTIO_H
-#define __KVM_S390_VIRTIO_H
-
-#include <linux/types.h>
-
-struct kvm_device_desc {
- /* The device type: console, network, disk etc. Type 0 terminates. */
- __u8 type;
- /* The number of virtqueues (first in config array) */
- __u8 num_vq;
- /*
- * The number of bytes of feature bits. Multiply by 2: one for host
- * features and one for guest acknowledgements.
- */
- __u8 feature_len;
- /* The number of bytes of the config array after virtqueues. */
- __u8 config_len;
- /* A status byte, written by the Guest. */
- __u8 status;
- __u8 config[0];
-};
-
-/*
- * This is how we expect the device configuration field for a virtqueue
- * to be laid out in config space.
- */
-struct kvm_vqconfig {
- /* The token returned with an interrupt. Set by the guest */
- __u64 token;
- /* The address of the virtio ring */
- __u64 address;
- /* The number of entries in the virtio_ring */
- __u16 num;
-
-};
-
-#define KVM_S390_VIRTIO_NOTIFY 0
-#define KVM_S390_VIRTIO_RESET 1
-#define KVM_S390_VIRTIO_SET_STATUS 2
-
-/* The alignment to use between consumer and producer parts of vring.
- * This is pagesize for historical reasons. */
-#define KVM_S390_VIRTIO_RING_ALIGN 4096
-
-
-/* These values are supposed to be in ext_params on an interrupt */
-#define VIRTIO_PARAM_MASK 0xff
-#define VIRTIO_PARAM_VRING_INTERRUPT 0x0
-#define VIRTIO_PARAM_CONFIG_CHANGED 0x1
-#define VIRTIO_PARAM_DEV_ADD 0x2
-
-#endif
diff --git a/arch/s390/include/uapi/asm/sthyi.h b/arch/s390/include/uapi/asm/sthyi.h
new file mode 100644
index 000000000000..ec113db4eb7e
--- /dev/null
+++ b/arch/s390/include/uapi/asm/sthyi.h
@@ -0,0 +1,6 @@
+#ifndef _UAPI_ASM_STHYI_H
+#define _UAPI_ASM_STHYI_H
+
+#define STHYI_FC_CP_IFL_CAP 0
+
+#endif /* _UAPI_ASM_STHYI_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index b52bce8ee941..725120939051 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -316,7 +316,8 @@
#define __NR_pwritev2 377
#define __NR_s390_guarded_storage 378
#define __NR_statx 379
-#define NR_syscalls 380
+#define __NR_s390_sthyi 380
+#define NR_syscalls 381
/*
* There are some system calls that are not present on 64 bit, some