diff options
Diffstat (limited to 'arch/s390/include/asm')
79 files changed, 1529 insertions, 1127 deletions
diff --git a/arch/s390/include/asm/abs_lowcore.h b/arch/s390/include/asm/abs_lowcore.h new file mode 100644 index 000000000000..4c61b14ee928 --- /dev/null +++ b/arch/s390/include/asm/abs_lowcore.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ABS_LOWCORE_H +#define _ASM_S390_ABS_LOWCORE_H + +#include <asm/lowcore.h> + +#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore)) + +extern unsigned long __abs_lowcore; +extern bool abs_lowcore_mapped; + +struct lowcore *get_abs_lowcore(unsigned long *flags); +void put_abs_lowcore(struct lowcore *lc, unsigned long flags); +int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc); +void abs_lowcore_unmap(int cpu); + +#endif /* _ASM_S390_ABS_LOWCORE_H */ diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h index 01936fdfaddb..e82e5626e139 100644 --- a/arch/s390/include/asm/airq.h +++ b/arch/s390/include/asm/airq.h @@ -12,10 +12,11 @@ #include <linux/bit_spinlock.h> #include <linux/dma-mapping.h> +#include <asm/tpi.h> struct airq_struct { struct hlist_node list; /* Handler queueing. */ - void (*handler)(struct airq_struct *airq, bool floating); + void (*handler)(struct airq_struct *airq, struct tpi_info *tpi_info); u8 *lsi_ptr; /* Local-Summary-Indicator pointer */ u8 lsi_mask; /* Local-Summary-Indicator mask */ u8 isc; /* Interrupt-subclass */ @@ -46,8 +47,10 @@ struct airq_iv { #define AIRQ_IV_PTR 4 /* Allocate the ptr array */ #define AIRQ_IV_DATA 8 /* Allocate the data array */ #define AIRQ_IV_CACHELINE 16 /* Cacheline alignment for the vector */ +#define AIRQ_IV_GUESTVEC 32 /* Vector is a pinned guest page */ -struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); +struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags, + unsigned long *vec); void airq_iv_release(struct airq_iv *iv); unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num); void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num); diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h index 955d620db23e..7db046596b93 100644 --- a/arch/s390/include/asm/alternative-asm.h +++ b/arch/s390/include/asm/alternative-asm.h @@ -5,19 +5,6 @@ #ifdef __ASSEMBLY__ /* - * Check the length of an instruction sequence. The length may not be larger - * than 254 bytes and it has to be divisible by 2. - */ -.macro alt_len_check start,end - .if ( \end - \start ) > 254 - .error "cpu alternatives does not support instructions blocks > 254 bytes\n" - .endif - .if ( \end - \start ) % 2 - .error "cpu alternatives instructions length is odd\n" - .endif -.endm - -/* * Issue one struct alt_instr descriptor entry (need to put it into * the section .altinstructions, see below). This entry contains * enough information for the alternatives patching code to patch an @@ -28,60 +15,29 @@ .long \alt_start - . .word \feature .byte \orig_end - \orig_start - .byte \alt_end - \alt_start -.endm - -/* - * Fill up @bytes with nops. The macro emits 6-byte nop instructions - * for the bulk of the area, possibly followed by a 4-byte and/or - * a 2-byte nop if the size of the area is not divisible by 6. - */ -.macro alt_pad_fill bytes - .fill ( \bytes ) / 6, 6, 0xc0040000 - .fill ( \bytes ) % 6 / 4, 4, 0x47000000 - .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700 -.endm - -/* - * Fill up @bytes with nops. If the number of bytes is larger - * than 6, emit a jg instruction to branch over all nops, then - * fill an area of size (@bytes - 6) with nop instructions. - */ -.macro alt_pad bytes - .if ( \bytes > 0 ) - .if ( \bytes > 6 ) - jg . + \bytes - alt_pad_fill \bytes - 6 - .else - alt_pad_fill \bytes - .endif - .endif + .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start ) + .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start ) .endm /* * Define an alternative between two instructions. If @feature is * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. ".skip" directive takes care of proper instruction padding - * in case @newinstr is longer than @oldinstr. + * @newinstr. */ .macro ALTERNATIVE oldinstr, newinstr, feature .pushsection .altinstr_replacement,"ax" 770: \newinstr 771: .popsection 772: \oldinstr -773: alt_len_check 770b, 771b - alt_len_check 772b, 773b - alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) -774: .pushsection .altinstructions,"a" - alt_entry 772b, 774b, 770b, 771b, \feature +773: .pushsection .altinstructions,"a" + alt_entry 772b, 773b, 770b, 771b, \feature .popsection .endm /* * Define an alternative between two instructions. If @feature is * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. ".skip" directive takes care of proper instruction padding - * in case @newinstr is longer than @oldinstr. + * @newinstr. */ .macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 .pushsection .altinstr_replacement,"ax" @@ -89,17 +45,9 @@ 771: \newinstr2 772: .popsection 773: \oldinstr -774: alt_len_check 770b, 771b - alt_len_check 771b, 772b - alt_len_check 773b, 774b - .if ( 771b - 770b > 772b - 771b ) - alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) - .else - alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) - .endif -775: .pushsection .altinstructions,"a" - alt_entry 773b, 775b, 770b, 771b,\feature1 - alt_entry 773b, 775b, 771b, 772b,\feature2 +774: .pushsection .altinstructions,"a" + alt_entry 773b, 774b, 770b, 771b,\feature1 + alt_entry 773b, 774b, 771b, 772b,\feature2 .popsection .endm diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index d3880ca764ee..904dd049f954 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -13,32 +13,25 @@ struct alt_instr { s32 repl_offset; /* offset to replacement instruction */ u16 facility; /* facility bit set for replacement */ u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction */ } __packed; void apply_alternative_instructions(void); void apply_alternatives(struct alt_instr *start, struct alt_instr *end); /* - * |661: |662: |6620 |663: - * +-----------+---------------------+ - * | oldinstr | oldinstr_padding | - * | +----------+----------+ - * | | | | - * | | >6 bytes |6/4/2 nops| - * | |6 bytes jg-----------> - * +-----------+---------------------+ - * ^^ static padding ^^ + * +---------------------------------+ + * |661: |662: + * | oldinstr | + * +---------------------------------+ * * .altinstr_replacement section - * +---------------------+-----------+ + * +---------------------------------+ * |6641: |6651: * | alternative instr 1 | - * +-----------+---------+- - - - - -+ - * |6642: |6652: | - * | alternative instr 2 | padding - * +---------------------+- - - - - -+ - * ^ runtime ^ + * +---------------------------------+ + * |6642: |6652: + * | alternative instr 2 | + * +---------------------------------+ * * .altinstructions section * +---------------------------------+ @@ -47,70 +40,31 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); * +---------------------------------+ */ -#define b_altinstr(num) "664"#num -#define e_altinstr(num) "665"#num - -#define e_oldinstr_pad_end "663" +#define b_altinstr(num) "664"#num +#define e_altinstr(num) "665"#num #define oldinstr_len "662b-661b" -#define oldinstr_total_len e_oldinstr_pad_end"b-661b" #define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" -#define oldinstr_pad_len(num) \ - "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \ - "((" altinstr_len(num) ")-(" oldinstr_len "))" - -#define INSTR_LEN_SANITY_CHECK(len) \ - ".if " len " > 254\n" \ - "\t.error \"cpu alternatives does not support instructions " \ - "blocks > 254 bytes\"\n" \ - ".endif\n" \ - ".if (" len ") %% 2\n" \ - "\t.error \"cpu alternatives instructions length is odd\"\n" \ - ".endif\n" - -#define OLDINSTR_PADDING(oldinstr, num) \ - ".if " oldinstr_pad_len(num) " > 6\n" \ - "\tjg " e_oldinstr_pad_end "f\n" \ - "6620:\n" \ - "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \ - ".else\n" \ - "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \ - "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \ - "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \ - ".endif\n" - -#define OLDINSTR(oldinstr, num) \ - "661:\n\t" oldinstr "\n662:\n" \ - OLDINSTR_PADDING(oldinstr, num) \ - e_oldinstr_pad_end ":\n" \ - INSTR_LEN_SANITY_CHECK(oldinstr_len) - -#define OLDINSTR_2(oldinstr, num1, num2) \ - "661:\n\t" oldinstr "\n662:\n" \ - ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \ - OLDINSTR_PADDING(oldinstr, num2) \ - ".else\n" \ - OLDINSTR_PADDING(oldinstr, num1) \ - ".endif\n" \ - e_oldinstr_pad_end ":\n" \ - INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define OLDINSTR(oldinstr) \ + "661:\n\t" oldinstr "\n662:\n" #define ALTINSTR_ENTRY(facility, num) \ "\t.long 661b - .\n" /* old instruction */ \ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ "\t.word " __stringify(facility) "\n" /* facility bit */ \ - "\t.byte " oldinstr_total_len "\n" /* source len */ \ - "\t.byte " altinstr_len(num) "\n" /* alt instruction len */ + "\t.byte " oldinstr_len "\n" /* instruction len */ \ + "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \ + "\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n" #define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ - b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \ - INSTR_LEN_SANITY_CHECK(altinstr_len(num)) + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" /* alternative assembly primitive: */ #define ALTERNATIVE(oldinstr, altinstr, facility) \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(altinstr, 1) \ ".popsection\n" \ - OLDINSTR(oldinstr, 1) \ + OLDINSTR(oldinstr) \ ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(facility, 1) \ ".popsection\n" @@ -120,7 +74,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ALTINSTR_REPLACEMENT(altinstr1, 1) \ ALTINSTR_REPLACEMENT(altinstr2, 2) \ ".popsection\n" \ - OLDINSTR_2(oldinstr, 1, 2) \ + OLDINSTR(oldinstr) \ ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(facility1, 1) \ ALTINSTR_ENTRY(facility2, 2) \ diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 3afbee21dc1f..f508f5025e38 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -12,6 +12,9 @@ #ifndef _ASM_S390_AP_H_ #define _ASM_S390_AP_H_ +#include <linux/io.h> +#include <asm/asm-extable.h> + /** * The ap_qid_t identifier of an ap queue. * If the AP facilities test (APFT) facility is available, @@ -57,11 +60,11 @@ static inline bool ap_instructions_available(void) unsigned long reg1 = 0; asm volatile( - " lgr 0,%[reg0]\n" /* qid into gr0 */ - " lghi 1,0\n" /* 0 into gr1 */ - " lghi 2,0\n" /* 0 into gr2 */ - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ - "0: la %[reg1],1\n" /* 1 into reg1 */ + " lgr 0,%[reg0]\n" /* qid into gr0 */ + " lghi 1,0\n" /* 0 into gr1 */ + " lghi 2,0\n" /* 0 into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ + "0: la %[reg1],1\n" /* 1 into reg1 */ "1:\n" EX_TABLE(0b, 1b) : [reg1] "+&d" (reg1) @@ -83,11 +86,11 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) unsigned long reg2; asm volatile( - " lgr 0,%[qid]\n" /* qid into gr0 */ - " lghi 2,0\n" /* 0 into gr2 */ - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* gr2 into reg2 */ + " lgr 0,%[qid]\n" /* qid into gr0 */ + " lghi 2,0\n" /* 0 into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* gr2 into reg2 */ : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2) : [qid] "d" (qid) : "cc", "0", "1", "2"); @@ -125,9 +128,9 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid) struct ap_queue_status reg1; asm volatile( - " lgr 0,%[reg0]\n" /* qid arg into gr0 */ - " .long 0xb2af0000\n" /* PQAP(RAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -146,9 +149,9 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid) struct ap_queue_status reg1; asm volatile( - " lgr 0,%[reg0]\n" /* qid arg into gr0 */ - " .long 0xb2af0000\n" /* PQAP(ZAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -187,10 +190,10 @@ static inline int ap_qci(struct ap_config_info *config) struct ap_config_info *reg2 = config; asm volatile( - " lgr 0,%[reg0]\n" /* QCI fc into gr0 */ - " lgr 2,%[reg2]\n" /* ptr to config into gr2 */ - " .long 0xb2af0000\n" /* PQAP(QCI) */ - "0: la %[reg1],0\n" /* good case, QCI fc available */ + " lgr 0,%[reg0]\n" /* QCI fc into gr0 */ + " lgr 2,%[reg2]\n" /* ptr to config into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(QCI) */ + "0: la %[reg1],0\n" /* good case, QCI fc available */ "1:\n" EX_TABLE(0b, 1b) : [reg1] "+&d" (reg1) @@ -224,13 +227,13 @@ struct ap_qirq_ctrl { * ap_aqic(): Control interruption for a specific AP. * @qid: The AP queue number * @qirqctrl: struct ap_qirq_ctrl (64 bit value) - * @ind: The notification indicator byte + * @pa_ind: Physical address of the notification indicator byte * * Returns AP queue status. */ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, struct ap_qirq_ctrl qirqctrl, - void *ind) + phys_addr_t pa_ind) { unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */ union { @@ -238,16 +241,16 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, struct ap_qirq_ctrl qirqctrl; struct ap_queue_status status; } reg1; - void *reg2 = ind; + unsigned long reg2 = pa_ind; reg1.qirqctrl = qirqctrl; asm volatile( - " lgr 0,%[reg0]\n" /* qid param into gr0 */ - " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ - " lgr 2,%[reg2]\n" /* ni addr into gr2 */ - " .long 0xb2af0000\n" /* PQAP(AQIC) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ + " lgr 2,%[reg2]\n" /* ni addr into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ : [reg1] "+&d" (reg1) : [reg0] "d" (reg0), [reg2] "d" (reg2) : "cc", "0", "1", "2"); @@ -294,11 +297,11 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, reg1.value = apinfo->val; asm volatile( - " lgr 0,%[reg0]\n" /* qid param into gr0 */ - " lgr 1,%[reg1]\n" /* qact in info into gr1 */ - " .long 0xb2af0000\n" /* PQAP(QACT) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* qact out info into reg2 */ + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lgr 1,%[reg1]\n" /* qact in info into gr1 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* qact out info into reg2 */ : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2) : [reg0] "d" (reg0) : "cc", "0", "1", "2"); diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h index 5dc712fde3c7..1594049893e0 100644 --- a/arch/s390/include/asm/archrandom.h +++ b/arch/s390/include/asm/archrandom.h @@ -2,7 +2,7 @@ /* * Kernel interface for the s390 arch_random_* functions * - * Copyright IBM Corp. 2017, 2020 + * Copyright IBM Corp. 2017, 2022 * * Author: Harald Freudenberger <freude@de.ibm.com> * @@ -11,44 +11,28 @@ #ifndef _ASM_S390_ARCHRANDOM_H #define _ASM_S390_ARCHRANDOM_H -#ifdef CONFIG_ARCH_RANDOM - #include <linux/static_key.h> +#include <linux/preempt.h> #include <linux/atomic.h> +#include <asm/cpacf.h> DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); extern atomic64_t s390_arch_random_counter; -bool s390_arch_get_random_long(unsigned long *v); -bool s390_arch_random_generate(u8 *buf, unsigned int nbytes); - -static inline bool __must_check arch_get_random_long(unsigned long *v) -{ - if (static_branch_likely(&s390_arch_random_available)) - return s390_arch_get_random_long(v); - return false; -} - -static inline bool __must_check arch_get_random_int(unsigned int *v) +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) { - return false; -} - -static inline bool __must_check arch_get_random_seed_long(unsigned long *v) -{ - if (static_branch_likely(&s390_arch_random_available)) { - return s390_arch_random_generate((u8 *)v, sizeof(*v)); - } - return false; + return 0; } -static inline bool __must_check arch_get_random_seed_int(unsigned int *v) +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) { - if (static_branch_likely(&s390_arch_random_available)) { - return s390_arch_random_generate((u8 *)v, sizeof(*v)); + if (static_branch_likely(&s390_arch_random_available) && + in_task()) { + cpacf_trng(NULL, 0, (u8 *)v, max_longs * sizeof(*v)); + atomic64_add(max_longs * sizeof(*v), &s390_arch_random_counter); + return max_longs; } - return false; + return 0; } -#endif /* CONFIG_ARCH_RANDOM */ #endif /* _ASM_S390_ARCHRANDOM_H */ diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h new file mode 100644 index 000000000000..b74f1070ddb2 --- /dev/null +++ b/arch/s390/include/asm/asm-extable.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_EXTABLE_H +#define __ASM_EXTABLE_H + +#include <linux/stringify.h> +#include <linux/bits.h> +#include <asm/asm-const.h> + +#define EX_TYPE_NONE 0 +#define EX_TYPE_FIXUP 1 +#define EX_TYPE_BPF 2 +#define EX_TYPE_UA_STORE 3 +#define EX_TYPE_UA_LOAD_MEM 4 +#define EX_TYPE_UA_LOAD_REG 5 + +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(3, 0) + +#define EX_DATA_REG_ADDR_SHIFT 4 +#define EX_DATA_REG_ADDR GENMASK(7, 4) + +#define EX_DATA_LEN_SHIFT 8 +#define EX_DATA_LEN GENMASK(11, 8) + +#define __EX_TABLE(_section, _fault, _target, _type) \ + stringify_in_c(.section _section,"a";) \ + stringify_in_c(.align 4;) \ + stringify_in_c(.long (_fault) - .;) \ + stringify_in_c(.long (_target) - .;) \ + stringify_in_c(.short (_type);) \ + stringify_in_c(.short 0;) \ + stringify_in_c(.previous) + +#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\ + stringify_in_c(.section _section,"a";) \ + stringify_in_c(.align 4;) \ + stringify_in_c(.long (_fault) - .;) \ + stringify_in_c(.long (_target) - .;) \ + stringify_in_c(.short (_type);) \ + stringify_in_c(.macro extable_reg regerr, regaddr;) \ + stringify_in_c(.set .Lfound, 0;) \ + stringify_in_c(.set .Lcurr, 0;) \ + stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \ + stringify_in_c( .ifc "\regerr", "%%r\rs";) \ + stringify_in_c( .set .Lfound, 1;) \ + stringify_in_c( .set .Lregerr, .Lcurr;) \ + stringify_in_c( .endif;) \ + stringify_in_c( .set .Lcurr, .Lcurr+1;) \ + stringify_in_c(.endr;) \ + stringify_in_c(.ifne (.Lfound != 1);) \ + stringify_in_c( .error "extable_reg: bad register argument1";) \ + stringify_in_c(.endif;) \ + stringify_in_c(.set .Lfound, 0;) \ + stringify_in_c(.set .Lcurr, 0;) \ + stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \ + stringify_in_c( .ifc "\regaddr", "%%r\rs";) \ + stringify_in_c( .set .Lfound, 1;) \ + stringify_in_c( .set .Lregaddr, .Lcurr;) \ + stringify_in_c( .endif;) \ + stringify_in_c( .set .Lcurr, .Lcurr+1;) \ + stringify_in_c(.endr;) \ + stringify_in_c(.ifne (.Lfound != 1);) \ + stringify_in_c( .error "extable_reg: bad register argument2";) \ + stringify_in_c(.endif;) \ + stringify_in_c(.short .Lregerr << EX_DATA_REG_ERR_SHIFT | \ + .Lregaddr << EX_DATA_REG_ADDR_SHIFT | \ + _len << EX_DATA_LEN_SHIFT;) \ + stringify_in_c(.endm;) \ + stringify_in_c(extable_reg _regerr,_regaddr;) \ + stringify_in_c(.purgem extable_reg;) \ + stringify_in_c(.previous) + +#define EX_TABLE(_fault, _target) \ + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP) + +#define EX_TABLE_AMODE31(_fault, _target) \ + __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP) + +#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0) + +#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len) + +#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0) + +#endif /* __ASM_EXTABLE_H */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 2c057e1f3200..82de2a7c4160 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -26,14 +26,14 @@ static __always_inline void bcr_serialize(void) asm volatile(__ASM_BCR_SERIALIZE : : : "memory"); } -#define mb() bcr_serialize() -#define rmb() barrier() -#define wmb() barrier() -#define dma_rmb() mb() -#define dma_wmb() mb() -#define __smp_mb() mb() -#define __smp_rmb() rmb() -#define __smp_wmb() wmb() +#define __mb() bcr_serialize() +#define __rmb() barrier() +#define __wmb() barrier() +#define __dma_rmb() __mb() +#define __dma_wmb() __mb() +#define __smp_mb() __mb() +#define __smp_rmb() __rmb() +#define __smp_wmb() __wmb() #define __smp_store_release(p, v) \ do { \ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 5a530c552c23..2de74fcd0578 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -113,76 +113,71 @@ static inline bool arch_test_and_change_bit(unsigned long nr, return old & mask; } -static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr |= mask; + *p |= mask; } -static inline void arch___clear_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr &= ~mask; + *p &= ~mask; } -static inline void arch___change_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr ^= mask; + *p ^= mask; } -static inline bool arch___test_and_set_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr |= mask; + old = *p; + *p |= mask; return old & mask; } -static inline bool arch___test_and_clear_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr &= ~mask; + old = *p; + *p &= ~mask; return old & mask; } -static inline bool arch___test_and_change_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr ^= mask; + old = *p; + *p ^= mask; return old & mask; } -static inline bool arch_test_bit(unsigned long nr, - const volatile unsigned long *ptr) -{ - const volatile unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - - return *addr & mask; -} +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire static inline bool arch_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) @@ -256,8 +251,6 @@ static inline bool test_bit_inv(unsigned long nr, return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - /** * __flogr - find leftmost one * @word - The word to search @@ -376,18 +369,7 @@ static inline int fls(unsigned int word) return fls64(word); } -#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ - -#include <asm-generic/bitops/__ffs.h> -#include <asm-generic/bitops/ffs.h> -#include <asm-generic/bitops/__fls.h> -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/fls64.h> - -#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ - #include <asm-generic/bitops/ffz.h> -#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/le.h> diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 0b25f28351ed..aebe1e22c7be 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -15,7 +15,8 @@ "1: .asciz \""__FILE__"\"\n" \ ".previous\n" \ ".section __bug_table,\"awM\",@progbits,%2\n" \ - "2: .long 0b-2b,1b-2b\n" \ + "2: .long 0b-.\n" \ + " .long 1b-.\n" \ " .short %0,%1\n" \ " .org 2b+%2\n" \ ".previous\n" \ @@ -30,7 +31,7 @@ asm_inline volatile( \ "0: mc 0,0\n" \ ".section __bug_table,\"awM\",@progbits,%1\n" \ - "1: .long 0b-1b\n" \ + "1: .long 0b-.\n" \ " .short %0\n" \ " .org 1b+%1\n" \ ".previous\n" \ diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index d4e90f2ba77e..bd1596810cc1 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -214,7 +214,6 @@ extern struct ccw_device *ccw_device_create_console(struct ccw_driver *); extern void ccw_device_destroy_console(struct ccw_device *); extern int ccw_device_enable_console(struct ccw_device *); extern void ccw_device_wait_idle(struct ccw_device *); -extern int ccw_device_force_console(struct ccw_device *); extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size); extern void ccw_device_dma_free(struct ccw_device *cdev, diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index aa995d91cd1d..11d2fb3de4f5 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -25,7 +25,7 @@ struct ccwgroup_device { unsigned int count; struct device dev; struct work_struct ungroup_work; - struct ccw_device *cdev[0]; + struct ccw_device *cdev[]; }; /** diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index ae4d2549cd67..bb48ea380c0d 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h @@ -63,7 +63,7 @@ struct chsc_pnso_area { struct chsc_header response; u32:32; struct chsc_pnso_naihdr naihdr; - struct chsc_pnso_naid_l2 entries[0]; + struct chsc_pnso_naid_l2 entries[]; } __packed __aligned(PAGE_SIZE); #endif /* _ASM_S390_CHSC_H */ diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 1effac6a0152..1c4f585dd39b 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -369,7 +369,7 @@ void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); /* Function from drivers/s390/cio/chsc.c */ -int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); +int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta); int chsc_sstpi(void *page, void *result, size_t size); int chsc_stzi(void *page, void *result, size_t size); int chsc_sgib(u32 origin); diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index cdc7ae72529d..a386070f1d56 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -8,10 +8,23 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/thread_info.h> +#include <asm/ptrace.h> #define compat_mode_t compat_mode_t typedef u16 compat_mode_t; +#define __compat_uid_t __compat_uid_t +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; + +#define compat_dev_t compat_dev_t +typedef u16 compat_dev_t; + +#define compat_ipc_pid_t compat_ipc_pid_t +typedef u16 compat_ipc_pid_t; + +#define compat_statfs compat_statfs + #include <asm-generic/compat.h> #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \ @@ -22,46 +35,16 @@ typedef u16 compat_mode_t; (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ }) -#define PSW32_MASK_PER 0x40000000UL -#define PSW32_MASK_DAT 0x04000000UL -#define PSW32_MASK_IO 0x02000000UL -#define PSW32_MASK_EXT 0x01000000UL -#define PSW32_MASK_KEY 0x00F00000UL -#define PSW32_MASK_BASE 0x00080000UL /* Always one */ -#define PSW32_MASK_MCHECK 0x00040000UL -#define PSW32_MASK_WAIT 0x00020000UL -#define PSW32_MASK_PSTATE 0x00010000UL -#define PSW32_MASK_ASC 0x0000C000UL -#define PSW32_MASK_CC 0x00003000UL -#define PSW32_MASK_PM 0x00000f00UL -#define PSW32_MASK_RI 0x00000080UL - #define PSW32_MASK_USER 0x0000FF00UL -#define PSW32_ADDR_AMODE 0x80000000UL -#define PSW32_ADDR_INSN 0x7FFFFFFFUL - -#define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20) - -#define PSW32_ASC_PRIMARY 0x00000000UL -#define PSW32_ASC_ACCREG 0x00004000UL -#define PSW32_ASC_SECONDARY 0x00008000UL -#define PSW32_ASC_HOME 0x0000C000UL - #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ PSW32_ASC_PRIMARY) -#define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" -typedef u16 __compat_uid_t; -typedef u16 __compat_gid_t; -typedef u16 compat_dev_t; typedef u16 compat_nlink_t; -typedef u16 compat_ipc_pid_t; -typedef __kernel_fsid_t compat_fsid_t; typedef struct { u32 mask; @@ -102,26 +85,6 @@ struct compat_stat { u32 __unused5; }; -struct compat_flock { - short l_type; - short l_whence; - compat_off_t l_start; - compat_off_t l_len; - compat_pid_t l_pid; -}; - -#define F_GETLK64 12 -#define F_SETLK64 13 -#define F_SETLKW64 14 - -struct compat_flock64 { - short l_type; - short l_whence; - compat_loff_t l_start; - compat_loff_t l_len; - compat_pid_t l_pid; -}; - struct compat_statfs { u32 f_type; u32 f_bsize; @@ -152,10 +115,6 @@ struct compat_statfs64 { u32 f_spare[4]; }; -#define COMPAT_RLIM_INFINITY 0xffffffff - -#define COMPAT_OFF_T_MAX 0x7fffffff - /* * A pointer passed in from user mode. This should not * be used for syscall parameters, just declare them @@ -178,61 +137,4 @@ static inline int is_compat_task(void) #endif -struct compat_ipc64_perm { - compat_key_t key; - __compat_uid32_t uid; - __compat_gid32_t gid; - __compat_uid32_t cuid; - __compat_gid32_t cgid; - compat_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned short __pad2; - unsigned int __unused1; - unsigned int __unused2; -}; - -struct compat_semid64_ds { - struct compat_ipc64_perm sem_perm; - compat_ulong_t sem_otime; - compat_ulong_t sem_otime_high; - compat_ulong_t sem_ctime; - compat_ulong_t sem_ctime_high; - compat_ulong_t sem_nsems; - compat_ulong_t __unused1; - compat_ulong_t __unused2; -}; - -struct compat_msqid64_ds { - struct compat_ipc64_perm msg_perm; - compat_ulong_t msg_stime; - compat_ulong_t msg_stime_high; - compat_ulong_t msg_rtime; - compat_ulong_t msg_rtime_high; - compat_ulong_t msg_ctime; - compat_ulong_t msg_ctime_high; - compat_ulong_t msg_cbytes; - compat_ulong_t msg_qnum; - compat_ulong_t msg_qbytes; - compat_pid_t msg_lspid; - compat_pid_t msg_lrpid; - compat_ulong_t __unused1; - compat_ulong_t __unused2; -}; - -struct compat_shmid64_ds { - struct compat_ipc64_perm shm_perm; - compat_size_t shm_segsz; - compat_ulong_t shm_atime; - compat_ulong_t shm_atime_high; - compat_ulong_t shm_dtime; - compat_ulong_t shm_dtime_high; - compat_ulong_t shm_ctime; - compat_ulong_t shm_ctime_high; - compat_pid_t shm_cpid; - compat_pid_t shm_lpid; - compat_ulong_t shm_nattch; - compat_ulong_t __unused1; - compat_ulong_t __unused2; -}; #endif /* _ASM_S390X_COMPAT_H */ diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 0d90cbeb89b4..feaba12dbecb 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -10,6 +10,7 @@ #define _ASM_S390_CPU_MF_H #include <linux/errno.h> +#include <asm/asm-extable.h> #include <asm/facility.h> asm(".include \"asm/cpu_mf-insn.h\"\n"); @@ -109,7 +110,9 @@ struct hws_basic_entry { unsigned int AS:2; /* 29-30 PSW address-space control */ unsigned int I:1; /* 31 entry valid or invalid */ unsigned int CL:2; /* 32-33 Configuration Level */ - unsigned int:14; + unsigned int H:1; /* 34 Host Indicator */ + unsigned int LS:1; /* 35 Limited Sampling */ + unsigned int:12; unsigned int prim_asn:16; /* primary ASN */ unsigned long long ia; /* Instruction Address */ unsigned long long gpp; /* Guest Program Parameter */ @@ -157,7 +160,7 @@ struct hws_trailer_entry { /* Load program parameter */ static inline void lpp(void *pp) { - asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory"); + asm volatile("lpp 0(%0)\n" :: "a" (pp) : "memory"); } /* Query counter information */ @@ -166,7 +169,7 @@ static inline int qctri(struct cpumf_ctr_info *info) int rc = -EINVAL; asm volatile ( - "0: .insn s,0xb28e0000,%1\n" + "0: qctri %1\n" "1: lhi %0,0\n" "2:\n" EX_TABLE(1b, 2b) @@ -180,7 +183,7 @@ static inline int lcctl(u64 ctl) int cc; asm volatile ( - " .insn s,0xb2840000,%1\n" + " lcctl %1\n" " ipm %0\n" " srl %0,28\n" : "=d" (cc) : "Q" (ctl) : "cc"); @@ -194,7 +197,7 @@ static inline int __ecctr(u64 ctr, u64 *content) int cc; asm volatile ( - " .insn rre,0xb2e40000,%0,%2\n" + " ecctr %0,%2\n" " ipm %1\n" " srl %1,28\n" : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc"); @@ -244,7 +247,7 @@ static inline int qsi(struct hws_qsi_info_block *info) int cc = 1; asm volatile( - "0: .insn s,0xb2860000,%1\n" + "0: qsi %1\n" "1: lhi %0,0\n" "2:\n" EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) @@ -259,7 +262,7 @@ static inline int lsctl(struct hws_lsctl_request_block *req) cc = 1; asm volatile( - "0: .insn s,0xb2870000,0(%1)\n" + "0: lsctl 0(%1)\n" "1: ipm %0\n" " srl %0,28\n" "2:\n" diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h index 14cfd48d598e..931204613753 100644 --- a/arch/s390/include/asm/cpufeature.h +++ b/arch/s390/include/asm/cpufeature.h @@ -2,28 +2,21 @@ /* * Module interface for CPU features * - * Copyright IBM Corp. 2015 + * Copyright IBM Corp. 2015, 2022 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ #ifndef __ASM_S390_CPUFEATURE_H #define __ASM_S390_CPUFEATURE_H -#include <asm/elf.h> +enum { + S390_CPU_FEATURE_MSA, + S390_CPU_FEATURE_VXRS, + S390_CPU_FEATURE_UV, + MAX_CPU_FEATURES +}; -/* Hardware features on Linux on z Systems are indicated by facility bits that - * are mapped to the so-called machine flags. Particular machine flags are - * then used to define ELF hardware capabilities; most notably hardware flags - * that are essential for user space / glibc. - * - * Restrict the set of exposed CPU features to ELF hardware capabilities for - * now. Additional machine flags can be indicated by values larger than - * MAX_ELF_HWCAP_FEATURES. - */ -#define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap)) -#define MAX_CPU_FEATURES MAX_ELF_HWCAP_FEATURES - -#define cpu_feature(feat) ilog2(HWCAP_ ## feat) +#define cpu_feature(feature) (feature) int cpu_have_feature(unsigned int nr); diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h index c6ebfd31f1db..97456d98fe76 100644 --- a/arch/s390/include/asm/crw.h +++ b/arch/s390/include/asm/crw.h @@ -5,7 +5,6 @@ * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, - * Heiko Carstens <heiko.carstens@de.ibm.com>, */ #ifndef _ASM_S390_CRW_H diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 04dc65f8901d..adf7d8cdac7e 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -12,6 +12,8 @@ #define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10) #define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35) +#define CR0_FETCH_PROTECTION_OVERRIDE BIT(63 - 38) +#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(63 - 39) #define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49) #define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50) #define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52) @@ -72,8 +74,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) __ctl_load(reg, cr, cr); } -void smp_ctl_set_bit(int cr, int bit); -void smp_ctl_clear_bit(int cr, int bit); +void smp_ctl_set_clear_bit(int cr, int bit, bool set); + +static inline void ctl_set_bit(int cr, int bit) +{ + smp_ctl_set_clear_bit(cr, bit, true); +} + +static inline void ctl_clear_bit(int cr, int bit) +{ + smp_ctl_set_clear_bit(cr, bit, false); +} union ctlreg0 { unsigned long val; @@ -82,7 +93,10 @@ union ctlreg0 { unsigned long tcx : 1; /* Transactional-Execution control */ unsigned long pifo : 1; /* Transactional-Execution Program- Interruption-Filtering Override */ - unsigned long : 22; + unsigned long : 3; + unsigned long ccc : 1; /* Cryptography counter control */ + unsigned long pec : 1; /* PAI extension control */ + unsigned long : 17; unsigned long : 3; unsigned long lap : 1; /* Low-address-protection control */ unsigned long : 4; @@ -128,8 +142,5 @@ union ctlreg15 { }; }; -#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) -#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) - #endif /* __ASSEMBLY__ */ #endif /* __ASM_CTL_REG_H */ diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index b3a8cb4daed6..56e99c286d12 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -11,6 +11,7 @@ #include <linux/if_ether.h> #include <linux/percpu.h> +#include <asm/asm-extable.h> enum diag_stat_enum { DIAG_STAT_X008, @@ -47,8 +48,8 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) { unsigned long start_addr, end_addr; - start_addr = start_pfn << PAGE_SHIFT; - end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; + start_addr = pfn_to_phys(start_pfn); + end_addr = pfn_to_phys(start_pfn + num_pfn - 1); diag_stat_inc(DIAG_STAT_X010); asm volatile( diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h index 6f26f35d4a71..dec1c4ce628c 100644 --- a/arch/s390/include/asm/dma.h +++ b/arch/s390/include/asm/dma.h @@ -11,10 +11,4 @@ */ #define MAX_DMA_ADDRESS 0x80000000 -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - #endif /* _ASM_S390_DMA_H */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 445fe4c8184a..06f795855af7 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -78,7 +78,7 @@ struct aob { struct aob_rq_header { struct scm_device *scmdev; - char data[0]; + char data[]; }; struct scm_device { diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 17aead80aadb..000de2b1e67a 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -5,24 +5,25 @@ #include <linux/sched.h> #include <linux/audit.h> #include <linux/randomize_kstack.h> -#include <linux/tracehook.h> #include <linux/processor.h> #include <linux/uaccess.h> #include <asm/timex.h> #include <asm/fpu/api.h> +#include <asm/pai.h> #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) void do_per_trap(struct pt_regs *regs); -#ifdef CONFIG_DEBUG_ENTRY -static __always_inline void arch_check_user_regs(struct pt_regs *regs) +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) { - debug_user_asce(0); + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) + debug_user_asce(0); + + pai_kernel_enter(regs); } -#define arch_check_user_regs arch_check_user_regs -#endif /* CONFIG_DEBUG_ENTRY */ +#define arch_enter_from_user_mode arch_enter_from_user_mode static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, unsigned long ti_work) @@ -45,6 +46,8 @@ static __always_inline void arch_exit_to_user_mode(void) if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) debug_user_asce(1); + + pai_kernel_exit(current_pt_regs()); } #define arch_exit_to_user_mode arch_exit_to_user_mode @@ -59,7 +62,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static inline bool on_thread_stack(void) { - return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1)); + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); } #endif diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h index 16dc57dd90b3..af6ba52743e9 100644 --- a/arch/s390/include/asm/extable.h +++ b/arch/s390/include/asm/extable.h @@ -25,7 +25,7 @@ struct exception_table_entry { int insn, fixup; - long handler; + short type, data; }; extern struct exception_table_entry *__start_amode31_ex_table; @@ -38,28 +38,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) return (unsigned long)&x->fixup + x->fixup; } -typedef bool (*ex_handler_t)(const struct exception_table_entry *, - struct pt_regs *); - -static inline ex_handler_t -ex_fixup_handler(const struct exception_table_entry *x) -{ - if (likely(!x->handler)) - return NULL; - return (ex_handler_t)((unsigned long)&x->handler + x->handler); -} - -static inline bool ex_handle(const struct exception_table_entry *x, - struct pt_regs *regs) -{ - ex_handler_t handler = ex_fixup_handler(x); - - if (unlikely(handler)) - return handler(x, regs); - regs->psw.addr = extable_fixup(x); - return true; -} - #define ARCH_HAS_RELATIVE_EXTABLE static inline void swap_ex_entry_fixup(struct exception_table_entry *a, @@ -69,8 +47,26 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a, { a->fixup = b->fixup + delta; b->fixup = tmp.fixup - delta; - a->handler = b->handler + delta; - b->handler = tmp.handler - delta; + a->type = b->type; + b->type = tmp.type; + a->data = b->data; + b->data = tmp.data; } +#define swap_ex_entry_fixup swap_ex_entry_fixup + +#ifdef CONFIG_BPF_JIT + +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); + +#else /* !CONFIG_BPF_JIT */ + +static inline bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs) +{ + return false; +} + +#endif /* CONFIG_BPF_JIT */ + +bool fixup_exception(struct pt_regs *regs); #endif diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h index cff0749e9657..b8a028a36173 100644 --- a/arch/s390/include/asm/fcx.h +++ b/arch/s390/include/asm/fcx.h @@ -214,7 +214,7 @@ struct dcw_intrg_data { u32 :32; u64 time; u64 prog_id; - u8 prog_data[0]; + u8 prog_data[]; } __attribute__ ((packed)); #define DCW_FLAGS_CC (1 << (7 - 1)) @@ -241,7 +241,7 @@ struct dcw { u32 :8; u32 cd_count:8; u32 count; - u8 cd[0]; + u8 cd[]; } __attribute__ ((packed)); #define TCCB_FORMAT_DEFAULT 0x7f diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h index a959b815a58b..b714ed0ef688 100644 --- a/arch/s390/include/asm/fpu/api.h +++ b/arch/s390/include/asm/fpu/api.h @@ -45,6 +45,7 @@ #define _ASM_S390_FPU_API_H #include <linux/preempt.h> +#include <asm/asm-extable.h> void save_fpu_regs(void); void load_fpu_regs(void); diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 267f70f4393f..6f80ec9c04be 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -47,15 +47,17 @@ struct ftrace_regs { static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) { - return &fregs->regs; + struct pt_regs *regs = &fregs->regs; + + if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS)) + return regs; + return NULL; } static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs, unsigned long ip) { - struct pt_regs *regs = arch_ftrace_get_regs(fregs); - - regs->psw.addr = ip; + fregs->regs.psw.addr = ip; } /* diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index c22debfcebf1..eaeaeb3ff0be 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -4,6 +4,7 @@ #include <linux/uaccess.h> #include <linux/futex.h> +#include <asm/asm-extable.h> #include <asm/mmu_context.h> #include <asm/errno.h> @@ -16,7 +17,8 @@ "3: jl 1b\n" \ " lhi %0,0\n" \ "4: sacf 768\n" \ - EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ + EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ "=m" (*uaddr) \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 40264f60b0da..5cc46e0dde62 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -147,5 +147,42 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start, void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], unsigned long gaddr, unsigned long vmaddr); int gmap_mark_unmergeable(void); -void s390_reset_acc(struct mm_struct *mm); +void s390_unlist_old_asce(struct gmap *gmap); +int s390_replace_asce(struct gmap *gmap); +void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns); +int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end, bool interruptible); + +/** + * s390_uv_destroy_range - Destroy a range of pages in the given mm. + * @mm: the mm on which to operate on + * @start: the start of the range + * @end: the end of the range + * + * This function will call cond_sched, so it should not generate stalls, but + * it will otherwise only return when it completed. + */ +static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + (void)__s390_uv_destroy_range(mm, start, end, false); +} + +/** + * s390_uv_destroy_range_interruptible - Destroy a range of pages in the + * given mm, but stop when a fatal signal is received. + * @mm: the mm on which to operate on + * @start: the start of the range + * @end: the end of the range + * + * This function will call cond_sched, so it should not generate stalls. If + * a fatal signal is received, it will return with -EINTR immediately, + * without finishing destroying the whole range. Upon successful + * completion, 0 is returned. + */ +static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + return __s390_uv_destroy_range(mm, start, end, true); +} #endif /* _ASM_S390_GMAP_H */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 60f9241e5e4a..ccdbccfde148 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { - if (len & ~HPAGE_MASK) + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) return -EINVAL; - if (addr & ~HPAGE_MASK) + if (addr & ~huge_page_mask(h)) return -EINVAL; return 0; } @@ -45,15 +47,15 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) { if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) - pte_val(*ptep) = _REGION3_ENTRY_EMPTY; + set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY)); else - pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY; + set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY)); } -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) { - huge_ptep_get_and_clear(vma->vm_mm, address, ptep); + return huge_ptep_get_and_clear(vma->vm_mm, address, ptep); } static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, @@ -85,6 +87,11 @@ static inline int huge_pte_none(pte_t pte) return pte_none(pte); } +static inline int huge_pte_none_mostly(pte_t pte) +{ + return huge_pte_none(pte); +} + static inline int huge_pte_write(pte_t pte) { return pte_write(pte); @@ -115,6 +122,21 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } +static inline pte_t huge_pte_mkuffd_wp(pte_t pte) +{ + return pte; +} + +static inline pte_t huge_pte_clear_uffd_wp(pte_t pte) +{ + return pte; +} + +static inline int huge_pte_uffd_wp(pte_t pte) +{ + return 0; +} + static inline bool gigantic_page_runtime_supported(void) { return true; diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index 6fb7aced104a..40eae2c08d61 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -108,7 +108,7 @@ clear_normalized_cda(struct ccw1 * ccw) struct idal_buffer { size_t size; size_t page_order; - void *data[0]; + void *data[]; }; /* diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 3f8ee257f9aa..a405b6bb89fb 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -133,6 +133,8 @@ int ipl_report_add_certificate(struct ipl_report *report, void *key, * DIAG 308 support */ enum diag308_subcode { + DIAG308_CLEAR_RESET = 0, + DIAG308_LOAD_NORMAL_RESET = 1, DIAG308_REL_HSA = 2, DIAG308_LOAD_CLEAR = 3, DIAG308_LOAD_NORMAL_DUMP = 4, @@ -141,6 +143,10 @@ enum diag308_subcode { DIAG308_LOAD_NORMAL = 7, }; +enum diag308_subcode_flags { + DIAG308_FLAG_EI = 1UL << 16, +}; + enum diag308_rc { DIAG308_RC_OK = 0x0001, DIAG308_RC_NOCONFIG = 0x0102, diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 9f75d67b8c20..89902f754740 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -81,8 +81,13 @@ static __always_inline void inc_irq_stat(enum interruption_class irq) } struct ext_code { - unsigned short subcode; - unsigned short code; + union { + struct { + unsigned short subcode; + unsigned short code; + }; + unsigned int int_code; + }; }; typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 916cfcb36d8a..895f774bbcc5 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -10,7 +10,6 @@ #include <linux/stringify.h> #define JUMP_LABEL_NOP_SIZE 6 -#define JUMP_LABEL_NOP_OFFSET 2 #ifdef CONFIG_CC_IS_CLANG #define JUMP_LABEL_STATIC_KEY_CONSTRAINT "i" @@ -21,12 +20,12 @@ #endif /* - * We use a brcl 0,2 instruction for jump labels at compile time so it + * We use a brcl 0,<offset> instruction for jump labels so it * can be easily distinguished from a hotpatch generated instruction. */ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" + asm_volatile_goto("0: brcl 0,%l[label]\n" ".pushsection __jump_table,\"aw\"\n" ".balign 8\n" ".long 0b-.,%l[label]-.\n" diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 7f3c9ac34bd8..1bd08eb56d5f 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -9,6 +9,8 @@ #ifndef _S390_KEXEC_H #define _S390_KEXEC_H +#include <linux/module.h> + #include <asm/processor.h> #include <asm/page.h> #include <asm/setup.h> @@ -29,7 +31,7 @@ #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) /* Allocate control page with GFP_DMA */ -#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA +#define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY) /* Maximum address we can use for the crash control pages */ #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) @@ -83,4 +85,26 @@ struct kimage_arch { extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_elf_ops; +#ifdef CONFIG_CRASH_DUMP +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range + +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres +#endif + +#ifdef CONFIG_KEXEC_FILE +struct purgatory_info; +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup +#endif #endif /*_S390_KEXEC_H */ diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 5eb722c984e4..598095f4b924 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -71,6 +71,7 @@ struct kprobe_ctlblk { void arch_remove_kprobe(struct kprobe *p); void __kretprobe_trampoline(void); +void trampoline_probe_handler(struct pt_regs *regs); int kprobe_fault_handler(struct pt_regs *regs, int trapnr); int kprobe_exceptions_notify(struct notifier_block *self, diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a604d51acfc8..b1e98a9ed152 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -19,6 +19,8 @@ #include <linux/kvm.h> #include <linux/seqlock.h> #include <linux/module.h> +#include <linux/pci.h> +#include <linux/mmu_notifier.h> #include <asm/debug.h> #include <asm/cpu.h> #include <asm/fpu/api.h> @@ -45,6 +47,8 @@ #define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3) #define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4) #define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5) +#define KVM_REQ_REFRESH_GUEST_PREFIX \ + KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define SIGP_CTRL_C 0x80 #define SIGP_CTRL_SCN_MASK 0x3f @@ -91,19 +95,30 @@ union ipte_control { }; }; +union sca_utility { + __u16 val; + struct { + __u16 mtcr : 1; + __u16 reserved : 15; + }; +}; + struct bsca_block { union ipte_control ipte_control; __u64 reserved[5]; __u64 mcn; - __u64 reserved2; + union sca_utility utility; + __u8 reserved2[6]; struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; }; struct esca_block { union ipte_control ipte_control; - __u64 reserved1[7]; + __u64 reserved1[6]; + union sca_utility utility; + __u8 reserved2[6]; __u64 mcn[4]; - __u64 reserved2[20]; + __u64 reserved3[20]; struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; }; @@ -247,12 +262,16 @@ struct kvm_s390_sie_block { #define ECB_SPECI 0x08 #define ECB_SRSI 0x04 #define ECB_HOSTPROTINT 0x02 +#define ECB_PTF 0x01 __u8 ecb; /* 0x0061 */ #define ECB2_CMMA 0x80 #define ECB2_IEP 0x20 #define ECB2_PFMFI 0x08 #define ECB2_ESCA 0x04 +#define ECB2_ZPCI_LSI 0x02 __u8 ecb2; /* 0x0062 */ +#define ECB3_AISI 0x20 +#define ECB3_AISII 0x10 #define ECB3_DEA 0x08 #define ECB3_AES 0x04 #define ECB3_RI 0x01 @@ -757,6 +776,7 @@ struct kvm_vm_stat { u64 inject_pfault_done; u64 inject_service_signal; u64 inject_virtio; + u64 aen_forward; }; struct kvm_arch_memory_slot { @@ -921,6 +941,8 @@ struct kvm_s390_pv { u64 guest_len; unsigned long stor_base; void *stor_var; + bool dumping; + struct mmu_notifier mmu_notifier; }; struct kvm_arch{ @@ -937,6 +959,7 @@ struct kvm_arch{ int use_cmma; int use_pfmfi; int use_skf; + int use_zpci_interp; int user_cpu_state_ctrl; int user_sigp; int user_stsi; @@ -960,6 +983,8 @@ struct kvm_arch{ DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS); struct kvm_s390_gisa_interrupt gisa_int; struct kvm_s390_pv pv; + struct list_head kzdev_list; + spinlock_t kzdev_list_lock; }; #define KVM_HVA_ERR_BAD (-1UL) @@ -1010,6 +1035,14 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu); +#define __KVM_HAVE_ARCH_VM_FREE +void kvm_arch_free_vm(struct kvm *kvm); + +struct zpci_kvm_hook { + int (*kvm_register)(void *opaque, struct kvm *kvm); + void (*kvm_unregister)(void *opaque); +}; + +extern struct zpci_kvm_hook zpci_kvm_hook; #endif diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h index 1ffea75b8ebc..c76777b15fec 100644 --- a/arch/s390/include/asm/linkage.h +++ b/arch/s390/include/asm/linkage.h @@ -2,27 +2,9 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -#include <asm/asm-const.h> #include <linux/stringify.h> #define __ALIGN .align 16, 0x07 #define __ALIGN_STR __stringify(__ALIGN) -/* - * Helper macro for exception table entries - */ - -#define __EX_TABLE(_section, _fault, _target) \ - stringify_in_c(.section _section,"a";) \ - stringify_in_c(.align 8;) \ - stringify_in_c(.long (_fault) - .;) \ - stringify_in_c(.long (_target) - .;) \ - stringify_in_c(.quad 0;) \ - stringify_in_c(.previous) - -#define EX_TABLE(_fault, _target) \ - __EX_TABLE(__ex_table, _fault, _target) -#define EX_TABLE_AMODE31(_fault, _target) \ - __EX_TABLE(.amode31.ex_table, _fault, _target) - #endif diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h deleted file mode 100644 index 5209f223331a..000000000000 --- a/arch/s390/include/asm/livepatch.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * livepatch.h - s390-specific Kernel Live Patching Core - * - * Copyright (c) 2013-2015 SUSE - * Authors: Jiri Kosina - * Vojtech Pavlik - * Jiri Slaby - */ - -#ifndef ASM_LIVEPATCH_H -#define ASM_LIVEPATCH_H - -#include <linux/ftrace.h> -#include <asm/ptrace.h> - -static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip) -{ - ftrace_instruction_pointer_set(fregs, ip); -} - -#endif diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 1262f5003acf..8aa1f6530a3e 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -34,12 +34,22 @@ struct lowcore { __u32 ext_int_code_addr; }; __u32 svc_int_code; /* 0x0088 */ - __u16 pgm_ilc; /* 0x008c */ - __u16 pgm_code; /* 0x008e */ + union { + struct { + __u16 pgm_ilc; /* 0x008c */ + __u16 pgm_code; /* 0x008e */ + }; + __u32 pgm_int_code; + }; __u32 data_exc_code; /* 0x0090 */ __u16 mon_class_num; /* 0x0094 */ - __u8 per_code; /* 0x0096 */ - __u8 per_atmid; /* 0x0097 */ + union { + struct { + __u8 per_code; /* 0x0096 */ + __u8 per_atmid; /* 0x0097 */ + }; + __u16 per_code_combined; + }; __u64 per_address; /* 0x0098 */ __u8 exc_access_id; /* 0x00a0 */ __u8 per_access_id; /* 0x00a1 */ @@ -153,11 +163,9 @@ struct lowcore { __u64 gmap; /* 0x03d0 */ __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ - /* br %r1 trampoline */ - __u16 br_r1_trampoline; /* 0x0400 */ - __u32 return_lpswe; /* 0x0402 */ - __u32 return_mcck_lpswe; /* 0x0406 */ - __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */ + __u32 return_lpswe; /* 0x0400 */ + __u32 return_mcck_lpswe; /* 0x0404 */ + __u8 pad_0x040a[0x0e00-0x0408]; /* 0x0408 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -192,7 +200,12 @@ struct lowcore { __u64 last_break_save_area; /* 0x1338 */ __u32 access_regs_save_area[16]; /* 0x1340 */ __u64 cregs_save_area[16]; /* 0x1380 */ - __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */ + __u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */ + /* Cryptography-counter designation */ + __u64 ccd; /* 0x1500 */ + /* AI-extension counter designation */ + __u64 aicd; /* 0x1508 */ + __u8 pad_0x1510[0x1800-0x1510]; /* 0x1510 */ /* Transaction abort diagnostic block */ struct pgm_tdb pgm_tdb; /* 0x1800 */ diff --git a/arch/s390/include/asm/maccess.h b/arch/s390/include/asm/maccess.h new file mode 100644 index 000000000000..c7fa838cf6b9 --- /dev/null +++ b/arch/s390/include/asm/maccess.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_S390_MACCESS_H +#define __ASM_S390_MACCESS_H + +#include <linux/types.h> + +struct iov_iter; + +extern unsigned long __memcpy_real_area; +void memcpy_real_init(void); +size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count); +int memcpy_real(void *dest, unsigned long src, size_t count); +#ifdef CONFIG_CRASH_DUMP +int copy_oldmem_kernel(void *dst, unsigned long src, size_t count); +#endif + +#endif /* __ASM_S390_MACCESS_H */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index e12ff0f29d1a..829d68e2c685 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -4,6 +4,7 @@ #include <linux/cpumask.h> #include <linux/errno.h> +#include <asm/asm-extable.h> typedef struct { spinlock_t lock; @@ -17,7 +18,7 @@ typedef struct { unsigned long asce_limit; unsigned long vdso_base; /* The mmu context belongs to a secure guest. */ - atomic_t is_protected; + atomic_t protected_count; /* * The following bitfields need a down_write on the mm * semaphore when they are written to. As they are only @@ -41,18 +42,4 @@ typedef struct { .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), -static inline int tprot(unsigned long addr) -{ - int rc = -EFAULT; - - asm volatile( - " tprot 0(%1),0\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b,1b) - : "+d" (rc) : "a" (addr) : "cc"); - return rc; -} - #endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c7937f369e62..2a38af5a00c2 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -26,7 +26,7 @@ static inline int init_new_context(struct task_struct *tsk, INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.flush_count, 0); - atomic_set(&mm->context.is_protected, 0); + atomic_set(&mm->context.protected_count, 0); mm->context.gmap_asce = 0; mm->context.flush_mm = 0; #ifdef CONFIG_PGSTE diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 2db45d7e68aa..af1cd3a6f406 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -6,7 +6,6 @@ * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, - * Heiko Carstens <heiko.carstens@de.ibm.com>, */ #ifndef _ASM_S390_NMI_H @@ -98,11 +97,11 @@ struct mcesa { struct pt_regs; -void nmi_alloc_boot_cpu(struct lowcore *lc); -int nmi_alloc_per_cpu(struct lowcore *lc); -void nmi_free_per_cpu(struct lowcore *lc); +void nmi_alloc_mcesa_early(u64 *mcesad); +int nmi_alloc_mcesa(u64 *mcesad); +void nmi_free_mcesa(u64 *mcesad); -void s390_handle_mcck(void); +void s390_handle_mcck(struct pt_regs *regs); void __s390_handle_mcck(void); int s390_do_machine_check(struct pt_regs *regs); diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h index 0033dcd663b1..7e9e99523e95 100644 --- a/arch/s390/include/asm/nospec-insn.h +++ b/arch/s390/include/asm/nospec-insn.h @@ -2,23 +2,24 @@ #ifndef _ASM_S390_NOSPEC_ASM_H #define _ASM_S390_NOSPEC_ASM_H -#include <asm/alternative-asm.h> -#include <asm/asm-offsets.h> #include <asm/dwarf.h> #ifdef __ASSEMBLY__ #ifdef CC_USING_EXPOLINE -_LC_BR_R1 = __LC_BR_R1 - /* * The expoline macros are used to create thunks in the same format * as gcc generates them. The 'comdat' section flag makes sure that * the various thunks are merged into a single copy. */ .macro __THUNK_PROLOG_NAME name +#ifdef CONFIG_EXPOLINE_EXTERN + .pushsection .text,"ax",@progbits + .align 16,0x07 +#else .pushsection .text.\name,"axG",@progbits,\name,comdat +#endif .globl \name .hidden \name .type \name,@function @@ -26,167 +27,101 @@ _LC_BR_R1 = __LC_BR_R1 CFI_STARTPROC .endm - .macro __THUNK_EPILOG + .macro __THUNK_EPILOG_NAME name CFI_ENDPROC +#ifdef CONFIG_EXPOLINE_EXTERN + .size \name, .-\name +#endif .popsection .endm - .macro __THUNK_PROLOG_BR r1,r2 - __THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1 - .endm - - .macro __THUNK_PROLOG_BC d0,r1,r2 - __THUNK_PROLOG_NAME __s390_indirect_branch_\d0\()_\r2\()use_\r1 + .macro __THUNK_PROLOG_BR r1 + __THUNK_PROLOG_NAME __s390_indirect_jump_r\r1 .endm - .macro __THUNK_BR r1,r2 - jg __s390_indirect_jump_r\r2\()use_r\r1 + .macro __THUNK_EPILOG_BR r1 + __THUNK_EPILOG_NAME __s390_indirect_jump_r\r1 .endm - .macro __THUNK_BC d0,r1,r2 - jg __s390_indirect_branch_\d0\()_\r2\()use_\r1 + .macro __THUNK_BR r1 + jg __s390_indirect_jump_r\r1 .endm - .macro __THUNK_BRASL r1,r2,r3 - brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2 + .macro __THUNK_BRASL r1,r2 + brasl \r1,__s390_indirect_jump_r\r2 .endm - .macro __DECODE_RR expand,reg,ruse - .set __decode_fail,1 + .macro __DECODE_R expand,reg + .set .L__decode_fail,1 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .ifc \reg,%r\r1 - .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - .ifc \ruse,%r\r2 - \expand \r1,\r2 - .set __decode_fail,0 - .endif - .endr + \expand \r1 + .set .L__decode_fail,0 .endif .endr - .if __decode_fail == 1 - .error "__DECODE_RR failed" + .if .L__decode_fail == 1 + .error "__DECODE_R failed" .endif .endm - .macro __DECODE_RRR expand,rsave,rtarget,ruse - .set __decode_fail,1 + .macro __DECODE_RR expand,rsave,rtarget + .set .L__decode_fail,1 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .ifc \rsave,%r\r1 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .ifc \rtarget,%r\r2 - .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - .ifc \ruse,%r\r3 - \expand \r1,\r2,\r3 - .set __decode_fail,0 - .endif - .endr - .endif - .endr - .endif - .endr - .if __decode_fail == 1 - .error "__DECODE_RRR failed" - .endif - .endm - - .macro __DECODE_DRR expand,disp,reg,ruse - .set __decode_fail,1 - .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - .ifc \reg,%r\r1 - .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - .ifc \ruse,%r\r2 - \expand \disp,\r1,\r2 - .set __decode_fail,0 + \expand \r1,\r2 + .set .L__decode_fail,0 .endif .endr .endif .endr - .if __decode_fail == 1 - .error "__DECODE_DRR failed" + .if .L__decode_fail == 1 + .error "__DECODE_RR failed" .endif .endm - .macro __THUNK_EX_BR reg,ruse - # Be very careful when adding instructions to this macro! - # The ALTERNATIVE replacement code has a .+10 which targets - # the "br \reg" after the code has been patched. -#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + .macro __THUNK_EX_BR reg exrl 0,555f j . -#else - .ifc \reg,%r1 - ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 - j . - .else - larl \ruse,555f - ex 0,0(\ruse) - j . - .endif -#endif 555: br \reg .endm - .macro __THUNK_EX_BC disp,reg,ruse -#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES - exrl 0,556f - j . +#ifdef CONFIG_EXPOLINE_EXTERN + .macro GEN_BR_THUNK reg + .endm + .macro GEN_BR_THUNK_EXTERN reg #else - larl \ruse,556f - ex 0,0(\ruse) - j . + .macro GEN_BR_THUNK reg #endif -556: b \disp(\reg) - .endm - - .macro GEN_BR_THUNK reg,ruse=%r1 - __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse - __THUNK_EX_BR \reg,\ruse - __THUNK_EPILOG - .endm - - .macro GEN_B_THUNK disp,reg,ruse=%r1 - __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse - __THUNK_EX_BC \disp,\reg,\ruse - __THUNK_EPILOG + __DECODE_R __THUNK_PROLOG_BR,\reg + __THUNK_EX_BR \reg + __DECODE_R __THUNK_EPILOG_BR,\reg .endm - .macro BR_EX reg,ruse=%r1 -557: __DECODE_RR __THUNK_BR,\reg,\ruse + .macro BR_EX reg +557: __DECODE_R __THUNK_BR,\reg .pushsection .s390_indirect_branches,"a",@progbits .long 557b-. .popsection .endm - .macro B_EX disp,reg,ruse=%r1 -558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse - .pushsection .s390_indirect_branches,"a",@progbits - .long 558b-. - .popsection - .endm - - .macro BASR_EX rsave,rtarget,ruse=%r1 -559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse + .macro BASR_EX rsave,rtarget +559: __DECODE_RR __THUNK_BRASL,\rsave,\rtarget .pushsection .s390_indirect_branches,"a",@progbits .long 559b-. .popsection .endm #else - .macro GEN_BR_THUNK reg,ruse=%r1 - .endm - - .macro GEN_B_THUNK disp,reg,ruse=%r1 + .macro GEN_BR_THUNK reg .endm - .macro BR_EX reg,ruse=%r1 + .macro BR_EX reg br \reg .endm - .macro B_EX disp,reg,ruse=%r1 - b \disp(\reg) - .endm - - .macro BASR_EX rsave,rtarget,ruse=%r1 + .macro BASR_EX rsave,rtarget basr \rsave,\rtarget .endm #endif /* CC_USING_EXPOLINE */ diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h index 3c89279d2a4b..0d1c74a7a650 100644 --- a/arch/s390/include/asm/os_info.h +++ b/arch/s390/include/asm/os_info.h @@ -8,6 +8,8 @@ #ifndef _ASM_S390_OS_INFO_H #define _ASM_S390_OS_INFO_H +#include <linux/uio.h> + #define OS_INFO_VERSION_MAJOR 1 #define OS_INFO_VERSION_MINOR 1 #define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */ @@ -39,7 +41,6 @@ u32 os_info_csum(struct os_info *os_info); #ifdef CONFIG_CRASH_DUMP void *os_info_old_entry(int nr, unsigned long *size); -int copy_oldmem_kernel(void *dst, void *src, size_t count); #else static inline void *os_info_old_entry(int nr, unsigned long *size) { diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index d98d17a36c7b..61dea67bb9c7 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -20,6 +20,8 @@ #define PAGE_SIZE _PAGE_SIZE #define PAGE_MASK _PAGE_MASK #define PAGE_DEFAULT_ACC 0 +/* storage-protection override */ +#define PAGE_SPO_ACC 9 #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) #define HPAGE_SHIFT 20 @@ -90,11 +92,31 @@ typedef pte_t *pgtable_t; #define pgprot_val(x) ((x).pgprot) #define pgste_val(x) ((x).pgste) -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pud_val(x) ((x).pud) -#define p4d_val(x) ((x).p4d) -#define pgd_val(x) ((x).pgd) + +static inline unsigned long pte_val(pte_t pte) +{ + return pte.pte; +} + +static inline unsigned long pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} + +static inline unsigned long pud_val(pud_t pud) +{ + return pud.pud; +} + +static inline unsigned long p4d_val(p4d_t p4d) +{ + return p4d.p4d; +} + +static inline unsigned long pgd_val(pgd_t pgd) +{ + return pgd.pgd; +} #define __pgste(x) ((pgste_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } ) diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h new file mode 100644 index 000000000000..1a8a6b15d121 --- /dev/null +++ b/arch/s390/include/asm/pai.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Processor Activity Instrumentation support for cryptography counters + * + * Copyright IBM Corp. 2022 + * Author(s): Thomas Richter <tmricht@linux.ibm.com> + */ +#ifndef _ASM_S390_PAI_H +#define _ASM_S390_PAI_H + +#include <linux/jump_label.h> +#include <asm/lowcore.h> +#include <asm/ptrace.h> + +struct qpaci_info_block { + u64 header; + struct { + u64 : 8; + u64 num_cc : 8; /* # of supported crypto counters */ + u64 : 9; + u64 num_nnpa : 7; /* # of supported NNPA counters */ + u64 : 32; + }; +}; + +static inline int qpaci(struct qpaci_info_block *info) +{ + /* Size of info (in double words minus one) */ + size_t size = sizeof(*info) / sizeof(u64) - 1; + int cc; + + asm volatile( + " lgr 0,%[size]\n" + " .insn s,0xb28f0000,%[info]\n" + " lgr %[size],0\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size) + : + : "0", "cc", "memory"); + return cc ? (size + 1) * sizeof(u64) : 0; +} + +#define PAI_CRYPTO_BASE 0x1000 /* First event number */ +#define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */ +#define PAI_CRYPTO_KERNEL_OFFSET 2048 +#define PAI_NNPA_BASE 0x1800 /* First event number */ +#define PAI_NNPA_MAXCTR 128 /* Max # of event counters */ + +DECLARE_STATIC_KEY_FALSE(pai_key); + +static __always_inline void pai_kernel_enter(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + if (!static_branch_unlikely(&pai_key)) + return; + if (!S390_lowcore.ccd) + return; + if (!user_mode(regs)) + return; + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET); +} + +static __always_inline void pai_kernel_exit(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + if (!static_branch_unlikely(&pai_key)) + return; + if (!S390_lowcore.ccd) + return; + if (!user_mode(regs)) + return; + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); +} + +#endif diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 90824be5ce9a..108e732d7b14 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -6,9 +6,9 @@ #include <linux/mutex.h> #include <linux/iommu.h> #include <linux/pci_hotplug.h> -#include <asm-generic/pci.h> #include <asm/pci_clp.h> #include <asm/pci_debug.h> +#include <asm/pci_insn.h> #include <asm/sclp.h> #define PCIBIOS_MIN_IO 0x1000 @@ -97,6 +97,7 @@ struct zpci_bar_struct { }; struct s390_domain; +struct kvm_zdev; #define ZPCI_FUNCTIONS_PER_BUS 256 struct zpci_bus { @@ -116,18 +117,20 @@ struct zpci_bus { struct zpci_dev { struct zpci_bus *zbus; struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */ - struct list_head bus_next; struct kref kref; struct hotplug_slot hotplug_slot; enum zpci_state state; u32 fid; /* function ID, used by sclp */ u32 fh; /* function handle, used by insn's */ + u32 gisa; /* GISA designation for passthrough */ u16 vfn; /* virtual function number */ u16 pchid; /* physical channel ID */ + u16 maxstbl; /* Maximum store block size */ u8 pfgid; /* function group ID */ u8 pft; /* pci function type */ u8 port; + u8 dtsm; /* Supported DT mask */ u8 rid_available : 1; u8 has_hp_slot : 1; u8 has_resources : 1; @@ -186,7 +189,10 @@ struct zpci_dev { struct dentry *debugfs_dev; + /* IOMMU and passthrough */ struct s390_domain *s390_domain; /* s390 IOMMU domain data */ + struct kvm_zdev *kzdev; + struct mutex kzdev_lock; }; static inline bool zdev_enabled(struct zpci_dev *zdev) @@ -198,6 +204,9 @@ extern const struct attribute_group *zpci_attr_groups[]; extern unsigned int s390_pci_force_floating __initdata; extern unsigned int s390_pci_no_rid; +extern union zpci_sic_iib *zpci_aipb; +extern struct airq_iv *zpci_aif_sbv; + /* ----------------------------------------------------------------------------- Prototypes ----------------------------------------------------------------------------- */ @@ -283,9 +292,6 @@ int zpci_dma_exit_device(struct zpci_dev *zdev); int __init zpci_irq_init(void); void __init zpci_irq_exit(void); -int zpci_set_irq(struct zpci_dev *zdev); -int zpci_clear_irq(struct zpci_dev *zdev); - /* FMB */ int zpci_fmb_enable_device(struct zpci_dev *); int zpci_fmb_disable_device(struct zpci_dev *); diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index 1f4b666e85ee..d6189ed14f84 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h @@ -153,9 +153,11 @@ struct clp_rsp_query_pci_grp { u8 : 6; u8 frame : 1; u8 refresh : 1; /* TLB refresh mode */ - u16 reserved2; + u16 : 3; + u16 maxstbl : 13; /* Maximum store block size */ u16 mui; - u16 : 16; + u8 dtsm; /* Supported DT mask */ + u8 reserved3; u16 maxfaal; u16 : 4; u16 dnoi : 12; @@ -173,7 +175,8 @@ struct clp_req_set_pci { u16 reserved2; u8 oc; /* operation controls */ u8 ndas; /* number of dma spaces */ - u64 reserved3; + u32 reserved3; + u32 gisa; /* GISA designation */ } __packed; /* Set PCI function response */ diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h index 5dfe47588277..3bb4e7e33a0e 100644 --- a/arch/s390/include/asm/pci_debug.h +++ b/arch/s390/include/asm/pci_debug.h @@ -17,9 +17,14 @@ extern debug_info_t *pci_debug_err_id; debug_text_event(pci_debug_err_id, 0, debug_buffer); \ } while (0) +static inline void zpci_err_hex_level(int level, void *addr, int len) +{ + debug_event(pci_debug_err_id, level, addr, len); +} + static inline void zpci_err_hex(void *addr, int len) { - debug_event(pci_debug_err_id, 0, addr, len); + zpci_err_hex_level(0, addr, len); } #endif diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h index 3b8e89d4578a..91e63426bdc5 100644 --- a/arch/s390/include/asm/pci_dma.h +++ b/arch/s390/include/asm/pci_dma.h @@ -97,23 +97,23 @@ static inline unsigned int calc_px(dma_addr_t ptr) return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; } -static inline void set_pt_pfaa(unsigned long *entry, void *pfaa) +static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) { *entry &= ZPCI_PTE_FLAG_MASK; - *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK); + *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); } -static inline void set_rt_sto(unsigned long *entry, void *sto) +static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) { *entry &= ZPCI_RTE_FLAG_MASK; - *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK); + *entry |= (sto & ZPCI_RTE_ADDR_MASK); *entry |= ZPCI_TABLE_TYPE_RTX; } -static inline void set_st_pto(unsigned long *entry, void *pto) +static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) { *entry &= ZPCI_STE_FLAG_MASK; - *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK); + *entry |= (pto & ZPCI_STE_ADDR_MASK); *entry |= ZPCI_TABLE_TYPE_SX; } @@ -169,16 +169,19 @@ static inline int pt_entry_isvalid(unsigned long entry) static inline unsigned long *get_rt_sto(unsigned long entry) { - return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) - ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK) - : NULL; + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; + } static inline unsigned long *get_st_pto(unsigned long entry) { - return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) - ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK) - : NULL; + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) + return phys_to_virt(entry & ZPCI_STE_ADDR_MASK); + else + return NULL; } /* Prototypes */ @@ -186,7 +189,7 @@ void dma_free_seg_table(unsigned long); unsigned long *dma_alloc_cpu_table(void); void dma_cleanup_tables(unsigned long *); unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); -void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags); +void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags); extern const struct dma_map_ops s390_pci_dma_ops; diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 61cf9531f68f..e5f57cfe1d45 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -98,6 +98,15 @@ struct zpci_fib { u32 gd; } __packed __aligned(8); +/* Set Interruption Controls Operation Controls */ +#define SIC_IRQ_MODE_ALL 0 +#define SIC_IRQ_MODE_SINGLE 1 +#define SIC_SET_AENI_CONTROLS 2 +#define SIC_IRQ_MODE_DIRECT 4 +#define SIC_IRQ_MODE_D_ALL 16 +#define SIC_IRQ_MODE_D_SINGLE 17 +#define SIC_IRQ_MODE_SET_CPU 18 + /* directed interruption information block */ struct zpci_diib { u32 : 1; @@ -119,9 +128,20 @@ struct zpci_cdiib { u64 : 64; } __packed __aligned(8); +/* adapter interruption parameters block */ +struct zpci_aipb { + u64 faisb; + u64 gait; + u16 : 13; + u16 afi : 3; + u32 : 32; + u16 faal; +} __packed __aligned(8); + union zpci_sic_iib { struct zpci_diib diib; struct zpci_cdiib cdiib; + struct zpci_aipb aipb; }; DECLARE_STATIC_KEY_FALSE(have_mio); @@ -134,13 +154,6 @@ int __zpci_store(u64 data, u64 req, u64 offset); int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len); int __zpci_store_block(const u64 *data, u64 req, u64 offset); void zpci_barrier(void); -int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib); - -static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc) -{ - union zpci_sic_iib iib = {{0}}; - - return __zpci_set_irq_ctrl(ctl, isc, &iib); -} +int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib); #endif diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f14a555eff74..17eb618f1348 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -103,17 +103,17 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) { - pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d); + set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d))); } static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) { - p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud); + set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud))); } static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { - pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); + set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd))); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) @@ -129,7 +129,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) { - pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); + set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte))); } #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 008a6c856fa4..f1cb9391190d 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -181,6 +181,8 @@ static inline int is_module_addr(void *addr) #define _PAGE_SOFT_DIRTY 0x000 #endif +#define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */ + /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ _PAGE_YOUNG | _PAGE_SOFT_DIRTY) @@ -422,23 +424,6 @@ static inline int is_module_addr(void *addr) * implies read permission. */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_RO -#define __P010 PAGE_RO -#define __P011 PAGE_RO -#define __P100 PAGE_RX -#define __P101 PAGE_RX -#define __P110 PAGE_RX -#define __P111 PAGE_RX - -#define __S000 PAGE_NONE -#define __S001 PAGE_RO -#define __S010 PAGE_RW -#define __S011 PAGE_RW -#define __S100 PAGE_RX -#define __S101 PAGE_RX -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX /* * Segment entry (large page) protection definitions. @@ -523,7 +508,7 @@ static inline int mm_has_pgste(struct mm_struct *mm) static inline int mm_is_protected(struct mm_struct *mm) { #ifdef CONFIG_PGSTE - if (unlikely(atomic_read(&mm->context.is_protected))) + if (unlikely(atomic_read(&mm->context.protected_count))) return 1; #endif return 0; @@ -538,6 +523,36 @@ static inline int mm_alloc_pgste(struct mm_struct *mm) return 0; } +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) +{ + return __pte(pte_val(pte) & ~pgprot_val(prot)); +} + +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) +{ + return __pte(pte_val(pte) | pgprot_val(prot)); +} + +static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) +{ + return __pmd(pmd_val(pmd) & ~pgprot_val(prot)); +} + +static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) +{ + return __pmd(pmd_val(pmd) | pgprot_val(prot)); +} + +static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot) +{ + return __pud(pud_val(pud) & ~pgprot_val(prot)); +} + +static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot) +{ + return __pud(pud_val(pud) | pgprot_val(prot)); +} + /* * In the case that a guest uses storage keys * faults should no longer be backed by zero pages @@ -570,7 +585,7 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new unsigned long address = (unsigned long)ptr | 1; asm volatile( - " .insn rre,0xb98a0000,%[r1],%[address]" + " cspg %[r1],%[address]" : [r1] "+&d" (r1.pair), "+m" (*ptr) : [address] "d" (address) : "cc"); @@ -796,6 +811,22 @@ static inline int pmd_protnone(pmd_t pmd) } #endif +#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); +} + static inline int pte_soft_dirty(pte_t pte) { return pte_val(pte) & _PAGE_SOFT_DIRTY; @@ -804,15 +835,13 @@ static inline int pte_soft_dirty(pte_t pte) static inline pte_t pte_mksoft_dirty(pte_t pte) { - pte_val(pte) |= _PAGE_SOFT_DIRTY; - return pte; + return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); } #define pte_swp_mksoft_dirty pte_mksoft_dirty static inline pte_t pte_clear_soft_dirty(pte_t pte) { - pte_val(pte) &= ~_PAGE_SOFT_DIRTY; - return pte; + return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); } #define pte_swp_clear_soft_dirty pte_clear_soft_dirty @@ -823,14 +852,12 @@ static inline int pmd_soft_dirty(pmd_t pmd) static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) { - pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; - return pmd; + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); } static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; - return pmd; + return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); } /* @@ -881,32 +908,57 @@ static inline pgprot_t pte_pgprot(pte_t pte) * pgd/pmd/pte modification functions */ +static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + WRITE_ONCE(*pgdp, pgd); +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + WRITE_ONCE(*p4dp, p4d); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + WRITE_ONCE(*pudp, pud); +} + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + WRITE_ONCE(*pmdp, pmd); +} + +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + WRITE_ONCE(*ptep, pte); +} + static inline void pgd_clear(pgd_t *pgd) { if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) - pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; + set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY)); } static inline void p4d_clear(p4d_t *p4d) { if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) - p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; + set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY)); } static inline void pud_clear(pud_t *pud) { if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) - pud_val(*pud) = _REGION3_ENTRY_EMPTY; + set_pud(pud, __pud(_REGION3_ENTRY_EMPTY)); } static inline void pmd_clear(pmd_t *pmdp) { - pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_val(*ptep) = _PAGE_INVALID; + set_pte(ptep, __pte(_PAGE_INVALID)); } /* @@ -915,79 +967,74 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt */ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - pte_val(pte) &= _PAGE_CHG_MASK; - pte_val(pte) |= pgprot_val(newprot); + pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK)); + pte = set_pte_bit(pte, newprot); /* * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX * has the invalid bit set, clear it again for readable, young pages */ if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) - pte_val(pte) &= ~_PAGE_INVALID; + pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); /* * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page * protection bit set, clear it again for writable, dirty pages */ if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) - pte_val(pte) &= ~_PAGE_PROTECT; + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); return pte; } static inline pte_t pte_wrprotect(pte_t pte) { - pte_val(pte) &= ~_PAGE_WRITE; - pte_val(pte) |= _PAGE_PROTECT; - return pte; + pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE)); + return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); } static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) |= _PAGE_WRITE; + pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE)); if (pte_val(pte) & _PAGE_DIRTY) - pte_val(pte) &= ~_PAGE_PROTECT; + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); return pte; } static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~_PAGE_DIRTY; - pte_val(pte) |= _PAGE_PROTECT; - return pte; + pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY)); + return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); } static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; + pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); if (pte_val(pte) & _PAGE_WRITE) - pte_val(pte) &= ~_PAGE_PROTECT; + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); return pte; } static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~_PAGE_YOUNG; - pte_val(pte) |= _PAGE_INVALID; - return pte; + pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG)); + return set_pte_bit(pte, __pgprot(_PAGE_INVALID)); } static inline pte_t pte_mkyoung(pte_t pte) { - pte_val(pte) |= _PAGE_YOUNG; + pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG)); if (pte_val(pte) & _PAGE_READ) - pte_val(pte) &= ~_PAGE_INVALID; + pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); return pte; } static inline pte_t pte_mkspecial(pte_t pte) { - pte_val(pte) |= _PAGE_SPECIAL; - return pte; + return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL)); } #ifdef CONFIG_HUGETLB_PAGE static inline pte_t pte_mkhuge(pte_t pte) { - pte_val(pte) |= _PAGE_LARGE; - return pte; + return set_pte_bit(pte, __pgprot(_PAGE_LARGE)); } #endif @@ -1006,7 +1053,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, if (__builtin_constant_p(opt) && opt == 0) { /* Invalidation + TLB flush for the pte */ asm volatile( - " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]" + " ipte %[r1],%[r2],0,%[m4]" : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), [m4] "i" (local)); return; @@ -1015,7 +1062,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, /* Invalidate ptes with options + TLB flush of the ptes */ opt = opt | (asce & _ASCE_ORIGIN); asm volatile( - " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" + " ipte %[r1],%[r2],%[r3],%[m4]" : [r2] "+a" (address), [r3] "+a" (opt) : [r1] "a" (pto), [m4] "i" (local) : "memory"); } @@ -1028,7 +1075,7 @@ static __always_inline void __ptep_ipte_range(unsigned long address, int nr, /* Invalidate a range of ptes + TLB flush of the ptes */ do { asm volatile( - " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" + " ipte %[r1],%[r2],%[r3],%[m4]" : [r2] "+a" (address), [r3] "+a" (nr) : [r1] "a" (pto), [m4] "i" (local) : "memory"); } while (nr != 255); @@ -1114,13 +1161,26 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, if (full) { res = *ptep; - *ptep = __pte(_PAGE_INVALID); + set_pte(ptep, __pte(_PAGE_INVALID)); } else { res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); } - /* At this point the reference through the mapping is still present */ - if (mm_is_protected(mm) && pte_present(res)) - uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + /* Nothing to do */ + if (!mm_is_protected(mm) || !pte_present(res)) + return res; + /* + * At this point the reference through the mapping is still present. + * The notifier should have destroyed all protected vCPUs at this + * point, so the destroy should be successful. + */ + if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK)) + return res; + /* + * If something went wrong and the page could not be destroyed, or + * if this is not a mm teardown, the slower export is used as + * fallback instead. + */ + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } @@ -1198,11 +1258,11 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { if (pte_present(entry)) - pte_val(entry) &= ~_PAGE_UNUSED; + entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED)); if (mm_has_pgste(mm)) ptep_set_pte_at(mm, addr, ptep, entry); else - *ptep = entry; + set_pte(ptep, entry); } /* @@ -1213,9 +1273,9 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t __pte; - pte_val(__pte) = physpage | pgprot_val(pgprot); + __pte = __pte(physpage | pgprot_val(pgprot)); if (!MACHINE_HAS_NX) - pte_val(__pte) &= ~_PAGE_NOEXEC; + __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC)); return pte_mkyoung(__pte); } @@ -1355,61 +1415,57 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end) static inline pmd_t pmd_wrprotect(pmd_t pmd) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; - return pmd; + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); } static inline pmd_t pmd_mkwrite(pmd_t pmd) { - pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) - pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); return pmd; } static inline pmd_t pmd_mkclean(pmd_t pmd) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; - return pmd; + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); } static inline pmd_t pmd_mkdirty(pmd_t pmd) { - pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY; + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY)); if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) - pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); return pmd; } static inline pud_t pud_wrprotect(pud_t pud) { - pud_val(pud) &= ~_REGION3_ENTRY_WRITE; - pud_val(pud) |= _REGION_ENTRY_PROTECT; - return pud; + pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); + return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); } static inline pud_t pud_mkwrite(pud_t pud) { - pud_val(pud) |= _REGION3_ENTRY_WRITE; + pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); if (pud_val(pud) & _REGION3_ENTRY_DIRTY) - pud_val(pud) &= ~_REGION_ENTRY_PROTECT; + pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); return pud; } static inline pud_t pud_mkclean(pud_t pud) { - pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; - pud_val(pud) |= _REGION_ENTRY_PROTECT; - return pud; + pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY)); + return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); } static inline pud_t pud_mkdirty(pud_t pud) { - pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY; + pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY)); if (pud_val(pud) & _REGION3_ENTRY_WRITE) - pud_val(pud) &= ~_REGION_ENTRY_PROTECT; + pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); return pud; } @@ -1433,37 +1489,39 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) static inline pmd_t pmd_mkyoung(pmd_t pmd) { - pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) - pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); return pmd; } static inline pmd_t pmd_mkold(pmd_t pmd) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; - pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; - return pmd; + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { - pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | - _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | - _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; - pmd_val(pmd) |= massage_pgprot_pmd(newprot); + unsigned long mask; + + mask = _SEGMENT_ENTRY_ORIGIN_LARGE; + mask |= _SEGMENT_ENTRY_DIRTY; + mask |= _SEGMENT_ENTRY_YOUNG; + mask |= _SEGMENT_ENTRY_LARGE; + mask |= _SEGMENT_ENTRY_SOFT_DIRTY; + pmd = __pmd(pmd_val(pmd) & mask); + pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot))); if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) - pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); return pmd; } static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) { - pmd_t __pmd; - pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); - return __pmd; + return __pmd(physpage + massage_pgprot_pmd(pgprot)); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ @@ -1491,7 +1549,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, if (__builtin_constant_p(opt) && opt == 0) { /* flush without guest asce */ asm volatile( - " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" + " idte %[r1],0,%[r2],%[m4]" : "+m" (*pmdp) : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), [m4] "i" (local) @@ -1499,7 +1557,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, } else { /* flush with guest asce */ asm volatile( - " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" + " idte %[r1],%[r3],%[r2],%[m4]" : "+m" (*pmdp) : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), [r3] "a" (asce), [m4] "i" (local) @@ -1518,7 +1576,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, if (__builtin_constant_p(opt) && opt == 0) { /* flush without guest asce */ asm volatile( - " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" + " idte %[r1],0,%[r2],%[m4]" : "+m" (*pudp) : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), [m4] "i" (local) @@ -1526,7 +1584,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, } else { /* flush with guest asce */ asm volatile( - " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" + " idte %[r1],%[r3],%[r2],%[m4]" : "+m" (*pudp) : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), [r3] "a" (asce), [m4] "i" (local) @@ -1585,16 +1643,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t entry) { if (!MACHINE_HAS_NX) - pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; - *pmdp = entry; + entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); + set_pmd(pmdp, entry); } static inline pmd_t pmd_mkhuge(pmd_t pmd) { - pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; - pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; - return pmd; + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE)); + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); } #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR @@ -1611,7 +1668,7 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, { if (full) { pmd_t pmd = *pmdp; - *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); return pmd; } return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); @@ -1669,18 +1726,18 @@ static inline int has_transparent_hugepage(void) /* * 64 bit swap entry format: * A page-table entry has some bits we have to treat in a special way. - * Bits 52 and bit 55 have to be zero, otherwise a specification - * exception will occur instead of a page translation exception. The - * specification exception has the bad habit not to store necessary - * information in the lowcore. - * Bits 54 and 63 are used to indicate the page type. + * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte + * as invalid. * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 - * This leaves the bits 0-51 and bits 56-62 to store type and offset. - * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 - * for the offset. - * | offset |01100|type |00| + * | offset |E11XX|type |S0| * |0000000000111111111122222222223333333333444444444455|55555|55566|66| * |0123456789012345678901234567890123456789012345678901|23456|78901|23| + * + * Bits 0-51 store the offset. + * Bit 52 (E) is used to remember PG_anon_exclusive. + * Bits 57-61 store the type. + * Bit 62 (S) is used for softdirty tracking. + * Bits 55 and 56 (X) are unused. */ #define __SWP_OFFSET_MASK ((1UL << 52) - 1) @@ -1690,12 +1747,12 @@ static inline int has_transparent_hugepage(void) static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { - pte_t pte; + unsigned long pteval; - pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; - pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; - pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; - return pte; + pteval = _PAGE_INVALID | _PAGE_PROTECT; + pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; + pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; + return __pte(pteval); } static inline unsigned long __swp_type(swp_entry_t entry) @@ -1720,6 +1777,10 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) extern int vmem_add_mapping(unsigned long start, unsigned long size); extern void vmem_remove_mapping(unsigned long start, unsigned long size); +extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); +extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot); +extern void vmem_unmap_4k_page(unsigned long addr); +extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc); extern int s390_enable_sie(void); extern int s390_enable_skey(void); extern void s390_reset_cmma(struct mm_struct *mm); diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index d9d5350cc3ec..bf15da0fedbc 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -46,10 +46,17 @@ static inline bool test_preempt_need_resched(void) static inline void __preempt_count_add(int val) { - if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) - __atomic_add_const(val, &S390_lowcore.preempt_count); - else - __atomic_add(val, &S390_lowcore.preempt_count); + /* + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES + * enabled, gcc 12 fails to handle __builtin_constant_p(). + */ + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { + __atomic_add_const(val, &S390_lowcore.preempt_count); + return; + } + } + __atomic_add(val, &S390_lowcore.preempt_count); } static inline void __preempt_count_sub(int val) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index f54c152bf2bf..87be3e855bf7 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -83,21 +83,22 @@ void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; extern void execve_tail(void); extern void __bpon(void); +unsigned long vdso_size(void); /* * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. */ -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ +#define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \ _REGION3_SIZE : TASK_SIZE_MAX) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) -#define TASK_SIZE TASK_SIZE_OF(current) #define TASK_SIZE_MAX (-PAGE_SIZE) -#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ - _REGION3_SIZE : _REGION2_SIZE) -#define STACK_TOP_MAX _REGION2_SIZE +#define VDSO_BASE (STACK_TOP + PAGE_SIZE) +#define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE) +#define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE) +#define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE) #define HAVE_ARCH_PICK_MMAP_LAYOUT @@ -185,9 +186,6 @@ struct pt_regs; void show_registers(struct pt_regs *regs); void show_cacheinfo(struct seq_file *m); -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *tsk) { } - /* Free guarded storage control block */ void guarded_storage_release(struct task_struct *tsk); void gs_load_bc_cb(struct pt_regs *regs); @@ -201,13 +199,7 @@ unsigned long __get_wchan(struct task_struct *p); /* Has task runtime instrumentation enabled ? */ #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) -static __always_inline unsigned long current_stack_pointer(void) -{ - unsigned long sp; - - asm volatile("la %0,0(15)" : "=a" (sp)); - return sp; -} +register unsigned long current_stack_pointer asm("r15"); static __always_inline unsigned short stap(void) { @@ -226,8 +218,7 @@ static inline unsigned long __ecag(unsigned int asi, unsigned char parm) { unsigned long val; - asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */ - : "=d" (val) : "a" (asi << 8 | parm)); + asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm)); return val; } @@ -310,24 +301,8 @@ static __always_inline void __noreturn disabled_wait(void) while (1); } -/* - * Basic Program Check Handler. - */ -extern void s390_base_pgm_handler(void); -extern void (*s390_base_pgm_handler_fn)(void); - #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL -extern int memcpy_real(void *, void *, size_t); -extern void memcpy_absolute(void *, void *, size_t); - -#define mem_assign_absolute(dest, val) do { \ - __typeof__(dest) __tmp = (val); \ - \ - BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \ - memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ -} while (0) - extern int s390_isolate_bp(void); extern int s390_isolate_bp_guest(void); diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 4ffa8e7f0ed3..8bae33ab320a 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -15,11 +15,13 @@ #define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */ #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ +#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */ #define _PIF_SYSCALL BIT(PIF_SYSCALL) #define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART) #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) +#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS) #ifndef __ASSEMBLY__ @@ -69,6 +71,35 @@ enum { &(*(struct psw_bits *)(&(__psw))); \ })) +#define PSW32_MASK_PER 0x40000000UL +#define PSW32_MASK_DAT 0x04000000UL +#define PSW32_MASK_IO 0x02000000UL +#define PSW32_MASK_EXT 0x01000000UL +#define PSW32_MASK_KEY 0x00F00000UL +#define PSW32_MASK_BASE 0x00080000UL /* Always one */ +#define PSW32_MASK_MCHECK 0x00040000UL +#define PSW32_MASK_WAIT 0x00020000UL +#define PSW32_MASK_PSTATE 0x00010000UL +#define PSW32_MASK_ASC 0x0000C000UL +#define PSW32_MASK_CC 0x00003000UL +#define PSW32_MASK_PM 0x00000f00UL +#define PSW32_MASK_RI 0x00000080UL + +#define PSW32_ADDR_AMODE 0x80000000UL +#define PSW32_ADDR_INSN 0x7FFFFFFFUL + +#define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20) + +#define PSW32_ASC_PRIMARY 0x00000000UL +#define PSW32_ASC_ACCREG 0x00004000UL +#define PSW32_ASC_SECONDARY 0x00008000UL +#define PSW32_ASC_HOME 0x0000C000UL + +typedef struct { + unsigned int mask; + unsigned int addr; +} psw_t32 __aligned(8); + #define PGM_INT_CODE_MASK 0x7f #define PGM_INT_CODE_PER 0x80 diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 4b9b14b20984..2f983e0b95e0 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -18,7 +18,6 @@ #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) #define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK) #define QDIO_MAX_ELEMENTS_PER_BUFFER 16 -#define QDIO_SBAL_SIZE 256 #define QDIO_QETH_QFMT 0 #define QDIO_ZFCP_QFMT 1 @@ -92,8 +91,8 @@ struct qdr { * @pfmt: implementation dependent parameter format * @rflags: QEBSM * @ac: adapter characteristics - * @isliba: absolute address of first input SLIB - * @osliba: absolute address of first output SLIB + * @isliba: logical address of first input SLIB + * @osliba: logical address of first output SLIB * @ebcnam: adapter identifier in EBCDIC * @parm: implementation dependent parameters */ @@ -134,9 +133,9 @@ struct slibe { * @sb_count: number of storage blocks * @sba: storage block element addresses * @dcount: size of storage block elements - * @user0: user defineable value - * @res4: reserved paramater - * @user1: user defineable value + * @user0: user definable value + * @res4: reserved parameter + * @user1: user definable value */ struct qaob { u64 res0[6]; @@ -313,7 +312,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @qib_rflags: rflags to set * @no_input_qs: number of input queues * @no_output_qs: number of output queues - * @input_handler: handler to be called for input queues + * @input_handler: handler to be called for input queues, and device-wide errors * @output_handler: handler to be called for output queues * @irq_poll: Data IRQ polling handler * @scan_threshold: # of in-use buffers that triggers scan on output queue @@ -337,9 +336,6 @@ struct qdio_initialize { struct qdio_buffer ***output_sbal_addr_array; }; -#define QDIO_FLAG_SYNC_INPUT 0x01 -#define QDIO_FLAG_SYNC_OUTPUT 0x02 - int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count); void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count); void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count); @@ -349,13 +345,18 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, extern int qdio_establish(struct ccw_device *cdev, struct qdio_initialize *init_data); extern int qdio_activate(struct ccw_device *); -extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, - unsigned int bufnr, unsigned int count, struct qaob *aob); extern int qdio_start_irq(struct ccw_device *cdev); extern int qdio_stop_irq(struct ccw_device *cdev); -extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, - bool is_input, unsigned int *bufnr, - unsigned int *error); +extern int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error); +extern int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error); +extern int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, + unsigned int q_nr, unsigned int bufnr, + unsigned int count); +extern int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, + unsigned int q_nr, unsigned int bufnr, + unsigned int count, struct qaob *aob); extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_free(struct ccw_device *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index c68ea35de498..9d4c7f71e070 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 2007 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ #ifndef _ASM_S390_SCLP_H @@ -18,6 +17,7 @@ #define EXT_SCCB_READ_CPU (3 * PAGE_SIZE) #ifndef __ASSEMBLY__ +#include <linux/uio.h> #include <asm/chpid.h> #include <asm/cpu.h> @@ -88,6 +88,11 @@ struct sclp_info { unsigned char has_diag318 : 1; unsigned char has_sipl : 1; unsigned char has_dirq : 1; + unsigned char has_iplcc : 1; + unsigned char has_zpci_lsi : 1; + unsigned char has_aisii : 1; + unsigned char has_aeni : 1; + unsigned char has_aisi : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; @@ -112,7 +117,7 @@ struct zpci_report_error_header { * (OpenCrypto Successful Diagnostics Execution) */ u16 length; /* Length of Subsequent Data (up to 4K – SCLP header */ - u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */ + u8 data[]; /* Subsequent Data passed verbatim to SCLP ET 24 */ } __packed; extern char *sclp_early_sccb; @@ -142,8 +147,7 @@ int sclp_pci_deconfigure(u32 fid); int sclp_ap_configure(u32 apid); int sclp_ap_deconfigure(u32 apid); int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid); -int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count); -int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count); +size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count); void sclp_ocf_cpc_name_copy(char *dst); static inline int sclp_get_core_info(struct sclp_core_info *info, int early) diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h index a7c3ccf681da..322bdcd4b616 100644 --- a/arch/s390/include/asm/scsw.h +++ b/arch/s390/include/asm/scsw.h @@ -215,6 +215,11 @@ union scsw { #define SNS2_ENV_DATA_PRESENT 0x10 #define SNS2_INPRECISE_END 0x04 +/* + * architectured values for PPRC errors + */ +#define SNS7_INVALID_ON_SEC 0x0e + /** * scsw_is_tm - check for transport mode scsw * @scsw: pointer to scsw @@ -508,9 +513,21 @@ static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) */ static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) { - return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && - !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); + /* Must be status pending. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Must have alert status. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS)) + return 0; + + /* Must be alone or together with primary, secondary or both, + * => no intermediate status. + */ + if (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) + return 0; + + return 1; } /** @@ -522,10 +539,25 @@ static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) */ static inline int scsw_cmd_is_valid_pno(union scsw *scsw) { - return (scsw->cmd.fctl != 0) && - (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && - (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || - (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)); + /* Must indicate at least one I/O function. */ + if (!scsw->cmd.fctl) + return 0; + + /* Must be status pending. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Can be status pending alone, or with any combination of primary, + * secondary and alert => no intermediate status. + */ + if (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS)) + return 1; + + /* If intermediate, must be suspended. */ + if (scsw->cmd.actl & SCSW_ACTL_SUSPENDED) + return 1; + + return 0; } /** @@ -675,9 +707,21 @@ static inline int scsw_tm_is_valid_q(union scsw *scsw) */ static inline int scsw_tm_is_valid_ectl(union scsw *scsw) { - return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && - !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); + /* Must be status pending. */ + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Must have alert status. */ + if (!(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS)) + return 0; + + /* Must be alone or together with primary, secondary or both, + * => no intermediate status. + */ + if (scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) + return 0; + + return 1; } /** @@ -689,11 +733,25 @@ static inline int scsw_tm_is_valid_ectl(union scsw *scsw) */ static inline int scsw_tm_is_valid_pno(union scsw *scsw) { - return (scsw->tm.fctl != 0) && - (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && - (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || - ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); + /* Must indicate at least one I/O function. */ + if (!scsw->tm.fctl) + return 0; + + /* Must be status pending. */ + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Can be status pending alone, or with any combination of primary, + * secondary and alert => no intermediate status. + */ + if (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS)) + return 1; + + /* If intermediate, must be suspended. */ + if (scsw->tm.actl & SCSW_ACTL_SUSPENDED) + return 1; + + return 0; } /** diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index f16f4d054ae2..73ed2781073b 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -3,7 +3,6 @@ * Copyright IBM Corp. 1999, 2012 * Author(s): Denis Joseph Barrow, * Martin Schwidefsky <schwidefsky@de.ibm.com>, - * Heiko Carstens <heiko.carstens@de.ibm.com>, */ #ifndef __ASM_SMP_H #define __ASM_SMP_H @@ -31,7 +30,8 @@ extern void smp_emergency_stop(void); extern int smp_find_processor_id(u16 address); extern int smp_store_status(int cpu); -extern void smp_save_dump_cpus(void); +extern void smp_save_dump_ipl_cpu(void); +extern void smp_save_dump_secondary_cpus(void); extern void smp_yield_cpu(int cpu); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); @@ -59,6 +59,7 @@ static inline void smp_cpus_done(unsigned int max_cpus) { } +extern int smp_reinit_ipl_cpu(void); extern int smp_rescan_cpus(void); extern void __noreturn cpu_die(void); extern void __cpu_die(unsigned int cpu); diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h index fd17f25704bd..1ac5115d3115 100644 --- a/arch/s390/include/asm/softirq_stack.h +++ b/arch/s390/include/asm/softirq_stack.h @@ -5,9 +5,10 @@ #include <asm/lowcore.h> #include <asm/stacktrace.h> +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK static inline void do_softirq_own_stack(void) { call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq); } - +#endif #endif /* __ASM_S390_SOFTIRQ_STACK_H */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 888a2f1c9ee3..37127cd7749e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -77,8 +77,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp) static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(int, lp->lock); + kcsan_release(); asm_inline volatile( - ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */ + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ " sth %1,%0\n" : "=R" (((unsigned short *) &lp->lock)[1]) : "d" (0) : "cc", "memory"); diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index dd00d98804ec..b23c658dce77 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -36,25 +36,24 @@ static inline bool on_stack(struct stack_info *info, /* * Stack layout of a C stack frame. + * Kernel uses the packed stack layout (-mpacked-stack). */ -#ifndef __PACK_STACK struct stack_frame { - unsigned long back_chain; - unsigned long empty1[5]; - unsigned long gprs[10]; - unsigned int empty2[8]; -}; -#else -struct stack_frame { - unsigned long empty1[5]; - unsigned int empty2[8]; + union { + unsigned long empty[9]; + struct { + unsigned long sie_control_block; + unsigned long sie_savearea; + unsigned long sie_reason; + unsigned long sie_flags; + }; + }; unsigned long gprs[10]; unsigned long back_chain; }; -#endif /* - * Unlike current_stack_pointer() which simply returns current value of %r15 + * Unlike current_stack_pointer which simply contains the current value of %r15 * current_frame_address() returns function stack frame address, which matches * %r15 upon function invocation. It may differ from %r15 later if function * allocates stack for local variables or new stack frame to call other diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h index ba07463897c1..4d74d7e33340 100644 --- a/arch/s390/include/asm/stp.h +++ b/arch/s390/include/asm/stp.h @@ -44,8 +44,8 @@ struct stp_sstpi { u32 : 32; u32 ctnid[3]; u32 : 32; - u32 todoff[4]; - u32 rsvd[48]; + u64 todoff; + u32 rsvd[50]; } __packed; struct stp_tzib { diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h index ad2c996e7e93..fde7e6b1df48 100644 --- a/arch/s390/include/asm/syscall_wrapper.h +++ b/arch/s390/include/asm/syscall_wrapper.h @@ -162,4 +162,4 @@ __diag_pop(); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#endif /* _ASM_X86_SYSCALL_WRAPPER_H */ +#endif /* _ASM_S390_SYSCALL_WRAPPER_H */ diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index fe7b3f8f0791..ab1c6316055c 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -67,12 +67,12 @@ struct sysinfo_1_2_2 { unsigned short cpus_configured; unsigned short cpus_standby; unsigned short cpus_reserved; - unsigned short adjustment[0]; + unsigned short adjustment[]; }; struct sysinfo_1_2_2_extension { unsigned int alt_capability; - unsigned short alt_adjustment[0]; + unsigned short alt_adjustment[]; }; struct sysinfo_2_2_1 { @@ -181,7 +181,7 @@ struct sysinfo_15_1_x { unsigned char reserved1; unsigned char mnest; unsigned char reserved2[4]; - union topology_entry tle[0]; + union topology_entry tle[]; }; int stsi(void *sysinfo, int fc, int sel1, int sel2); diff --git a/arch/s390/include/asm/termios.h b/arch/s390/include/asm/termios.h deleted file mode 100644 index 46fa3020b41e..000000000000 --- a/arch/s390/include/asm/termios.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * S390 version - * - * Derived from "include/asm-i386/termios.h" - */ -#ifndef _S390_TERMIOS_H -#define _S390_TERMIOS_H - -#include <uapi/asm/termios.h> - - -/* intr=^C quit=^\ erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) - -#include <asm-generic/termios-base.h> - -#endif /* _S390_TERMIOS_H */ diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 50d9b04ecbd1..ce878e85b6e4 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -148,7 +148,7 @@ struct ptff_qui { asm volatile( \ " lgr 0,%[reg0]\n" \ " lgr 1,%[reg1]\n" \ - " .insn e,0x0104\n" \ + " ptff\n" \ " ipm %[rc]\n" \ " srl %[rc],28\n" \ : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \ @@ -187,20 +187,17 @@ static inline unsigned long get_tod_clock(void) static inline unsigned long get_tod_clock_fast(void) { -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES unsigned long clk; asm volatile("stckf %0" : "=Q" (clk) : : "cc"); return clk; -#else - return get_tod_clock(); -#endif } static inline cycles_t get_cycles(void) { return (cycles_t) get_tod_clock() >> 2; } +#define get_cycles get_cycles int get_phys_clock(unsigned long *clock); void init_cpu_timer(void); diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index fe6407f0eb1b..3a5c8fb590e5 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -27,9 +27,6 @@ static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) - #define tlb_flush tlb_flush #define pte_free_tlb pte_free_tlb #define pmd_free_tlb pmd_free_tlb diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 6448bb5be10c..a6e2cd89b609 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -25,9 +25,7 @@ static inline void __tlb_flush_idte(unsigned long asce) if (MACHINE_HAS_TLB_GUEST) opt |= IDTE_GUEST_ASCE; /* Global TLB flush for the mm */ - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (opt), "a" (asce) : "cc"); + asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); } /* diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h index 1ac538b8cbf5..f76e5fdff23a 100644 --- a/arch/s390/include/asm/tpi.h +++ b/arch/s390/include/asm/tpi.h @@ -19,6 +19,19 @@ struct tpi_info { u32 :12; } __packed __aligned(4); +/* I/O-Interruption Code as stored by TPI for an Adapter I/O */ +struct tpi_adapter_info { + u32 aism:8; + u32 :22; + u32 error:1; + u32 forward:1; + u32 reserved; + u32 adapter_IO:1; + u32 directed_irq:1; + u32 isc:3; + u32 :27; +} __packed __aligned(4); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_TPI_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index ce550d06abc3..f7038b800cc3 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -3,7 +3,7 @@ * S390 version * Copyright IBM Corp. 1999, 2000 * Author(s): Hartmut Penner (hp@de.ibm.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) * * Derived from "include/asm-i386/uaccess.h" */ @@ -13,26 +13,15 @@ /* * User space memory access functions */ +#include <asm/asm-extable.h> #include <asm/processor.h> #include <asm/ctl_reg.h> #include <asm/extable.h> #include <asm/facility.h> +#include <asm-generic/access_ok.h> void debug_user_asce(int exit); -static inline int __range_ok(unsigned long addr, unsigned long size) -{ - return 1; -} - -#define __access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __range_ok((unsigned long)(addr), (size)); \ -}) - -#define access_ok(addr, size) __access_ok(addr, size) - unsigned long __must_check raw_copy_from_user(void *to, const void __user *from, unsigned long n); @@ -44,30 +33,72 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); #define INLINE_COPY_TO_USER #endif -int __put_user_bad(void) __attribute__((noreturn)); -int __get_user_bad(void) __attribute__((noreturn)); - -#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES - -#define __put_get_user_asm(to, from, size, insn) \ -({ \ - int __rc; \ - \ - asm volatile( \ - insn " 0,%[spec]\n" \ - "0: mvcos %[_to],%[_from],%[_size]\n" \ - "1: xr %[rc],%[rc]\n" \ - "2:\n" \ - ".pushsection .fixup, \"ax\"\n" \ - "3: lhi %[rc],%[retval]\n" \ - " jg 2b\n" \ - ".popsection\n" \ - EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ - : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ - : [_size] "d" (size), [_from] "Q" (*(from)), \ - [retval] "K" (-EFAULT), [spec] "K" (0x81UL) \ - : "cc", "0"); \ - __rc; \ +unsigned long __must_check +_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); + +static __always_inline unsigned long __must_check +copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key) +{ + if (check_copy_size(to, n, false)) + n = _copy_from_user_key(to, from, n, key); + return n; +} + +unsigned long __must_check +_copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key); + +static __always_inline unsigned long __must_check +copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key) +{ + if (check_copy_size(from, n, true)) + n = _copy_to_user_key(to, from, n, key); + return n; +} + +union oac { + unsigned int val; + struct { + struct { + unsigned short key : 4; + unsigned short : 4; + unsigned short as : 2; + unsigned short : 4; + unsigned short k : 1; + unsigned short a : 1; + } oac1; + struct { + unsigned short key : 4; + unsigned short : 4; + unsigned short as : 2; + unsigned short : 4; + unsigned short k : 1; + unsigned short a : 1; + } oac2; + }; +}; + +int __noreturn __put_user_bad(void); + +#define __put_user_asm(to, from, size) \ +({ \ + union oac __oac_spec = { \ + .oac1.as = PSW_BITS_AS_SECONDARY, \ + .oac1.a = 1, \ + }; \ + int __rc; \ + \ + asm volatile( \ + " lr 0,%[spec]\n" \ + "0: mvcos %[_to],%[_from],%[_size]\n" \ + "1: xr %[rc],%[rc]\n" \ + "2:\n" \ + EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ + EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ + : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ + : [_size] "d" (size), [_from] "Q" (*(from)), \ + [spec] "d" (__oac_spec.val) \ + : "cc", "0"); \ + __rc; \ }) static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) @@ -76,24 +107,24 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon switch (size) { case 1: - rc = __put_get_user_asm((unsigned char __user *)ptr, - (unsigned char *)x, - size, "llilh"); + rc = __put_user_asm((unsigned char __user *)ptr, + (unsigned char *)x, + size); break; case 2: - rc = __put_get_user_asm((unsigned short __user *)ptr, - (unsigned short *)x, - size, "llilh"); + rc = __put_user_asm((unsigned short __user *)ptr, + (unsigned short *)x, + size); break; case 4: - rc = __put_get_user_asm((unsigned int __user *)ptr, - (unsigned int *)x, - size, "llilh"); + rc = __put_user_asm((unsigned int __user *)ptr, + (unsigned int *)x, + size); break; case 8: - rc = __put_get_user_asm((unsigned long __user *)ptr, - (unsigned long *)x, - size, "llilh"); + rc = __put_user_asm((unsigned long __user *)ptr, + (unsigned long *)x, + size); break; default: __put_user_bad(); @@ -102,30 +133,55 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon return rc; } +int __noreturn __get_user_bad(void); + +#define __get_user_asm(to, from, size) \ +({ \ + union oac __oac_spec = { \ + .oac2.as = PSW_BITS_AS_SECONDARY, \ + .oac2.a = 1, \ + }; \ + int __rc; \ + \ + asm volatile( \ + " lr 0,%[spec]\n" \ + "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \ + "1: xr %[rc],%[rc]\n" \ + "2:\n" \ + EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \ + EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \ + : [rc] "=&d" (__rc), "=Q" (*(to)) \ + : [_size] "d" (size), [_from] "Q" (*(from)), \ + [spec] "d" (__oac_spec.val), [_to] "a" (to), \ + [_ksize] "K" (size) \ + : "cc", "0"); \ + __rc; \ +}) + static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { int rc; switch (size) { case 1: - rc = __put_get_user_asm((unsigned char *)x, - (unsigned char __user *)ptr, - size, "lghi"); + rc = __get_user_asm((unsigned char *)x, + (unsigned char __user *)ptr, + size); break; case 2: - rc = __put_get_user_asm((unsigned short *)x, - (unsigned short __user *)ptr, - size, "lghi"); + rc = __get_user_asm((unsigned short *)x, + (unsigned short __user *)ptr, + size); break; case 4: - rc = __put_get_user_asm((unsigned int *)x, - (unsigned int __user *)ptr, - size, "lghi"); + rc = __get_user_asm((unsigned int *)x, + (unsigned int __user *)ptr, + size); break; case 8: - rc = __put_get_user_asm((unsigned long *)x, - (unsigned long __user *)ptr, - size, "lghi"); + rc = __get_user_asm((unsigned long *)x, + (unsigned long __user *)ptr, + size); break; default: __get_user_bad(); @@ -134,97 +190,81 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign return rc; } -#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ - -static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) -{ - size = raw_copy_to_user(ptr, x, size); - return size ? -EFAULT : 0; -} - -static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) -{ - size = raw_copy_from_user(x, ptr, size); - return size ? -EFAULT : 0; -} - -#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ - /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. */ -#define __put_user(x, ptr) \ -({ \ - __typeof__(*(ptr)) __x = (x); \ - int __pu_err = -EFAULT; \ - __chk_user_ptr(ptr); \ - switch (sizeof (*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - case 8: \ - __pu_err = __put_user_fn(&__x, ptr, \ - sizeof(*(ptr))); \ - break; \ - default: \ - __put_user_bad(); \ - break; \ - } \ - __builtin_expect(__pu_err, 0); \ +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __x = (x); \ + int __pu_err = -EFAULT; \ + \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \ + break; \ + default: \ + __put_user_bad(); \ + break; \ + } \ + __builtin_expect(__pu_err, 0); \ }) -#define put_user(x, ptr) \ -({ \ - might_fault(); \ - __put_user(x, ptr); \ +#define put_user(x, ptr) \ +({ \ + might_fault(); \ + __put_user(x, ptr); \ }) - -#define __get_user(x, ptr) \ -({ \ - int __gu_err = -EFAULT; \ - __chk_user_ptr(ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: { \ - unsigned char __x = 0; \ - __gu_err = __get_user_fn(&__x, ptr, \ - sizeof(*(ptr))); \ - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ - break; \ - }; \ - case 2: { \ - unsigned short __x = 0; \ - __gu_err = __get_user_fn(&__x, ptr, \ - sizeof(*(ptr))); \ - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ - break; \ - }; \ - case 4: { \ - unsigned int __x = 0; \ - __gu_err = __get_user_fn(&__x, ptr, \ - sizeof(*(ptr))); \ - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ - break; \ - }; \ - case 8: { \ - unsigned long long __x = 0; \ - __gu_err = __get_user_fn(&__x, ptr, \ - sizeof(*(ptr))); \ - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ - break; \ - }; \ - default: \ - __get_user_bad(); \ - break; \ - } \ - __builtin_expect(__gu_err, 0); \ +#define __get_user(x, ptr) \ +({ \ + int __gu_err = -EFAULT; \ + \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ + unsigned char __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + case 2: { \ + unsigned short __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + case 4: { \ + unsigned int __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + case 8: { \ + unsigned long __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + default: \ + __get_user_bad(); \ + break; \ + } \ + __builtin_expect(__gu_err, 0); \ }) -#define get_user(x, ptr) \ -({ \ - might_fault(); \ - __get_user(x, ptr); \ +#define get_user(x, ptr) \ +({ \ + might_fault(); \ + __get_user(x, ptr); \ }) /* @@ -245,11 +285,8 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return __clear_user(to, n); } -int copy_to_user_real(void __user *dest, void *src, unsigned long count); void *s390_kernel_write(void *dst, const void *src, size_t size); -#define HAVE_GET_KERNEL_NOFAULT - int __noreturn __put_kernel_bad(void); #define __put_kernel_asm(val, to, insn) \ @@ -257,23 +294,20 @@ int __noreturn __put_kernel_bad(void); int __rc; \ \ asm volatile( \ - "0: " insn " %2,%1\n" \ - "1: xr %0,%0\n" \ + "0: " insn " %[_val],%[_to]\n" \ + "1: xr %[rc],%[rc]\n" \ "2:\n" \ - ".pushsection .fixup, \"ax\"\n" \ - "3: lhi %0,%3\n" \ - " jg 2b\n" \ - ".popsection\n" \ - EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ - : "=d" (__rc), "+Q" (*(to)) \ - : "d" (val), "K" (-EFAULT) \ + EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ + EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ + : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \ + : [_val] "d" (val) \ : "cc"); \ __rc; \ }) #define __put_kernel_nofault(dst, src, type, err_label) \ do { \ - u64 __x = (u64)(*((type *)(src))); \ + unsigned long __x = (unsigned long)(*((type *)(src))); \ int __pk_err; \ \ switch (sizeof(type)) { \ @@ -304,16 +338,13 @@ int __noreturn __get_kernel_bad(void); int __rc; \ \ asm volatile( \ - "0: " insn " %1,%2\n" \ - "1: xr %0,%0\n" \ + "0: " insn " %[_val],%[_from]\n" \ + "1: xr %[rc],%[rc]\n" \ "2:\n" \ - ".pushsection .fixup, \"ax\"\n" \ - "3: lhi %0,%3\n" \ - " jg 2b\n" \ - ".popsection\n" \ - EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ - : "=d" (__rc), "+d" (val) \ - : "Q" (*(from)), "K" (-EFAULT) \ + EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \ + EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \ + : [rc] "=d" (__rc), [_val] "=d" (val) \ + : [_from] "Q" (*(from)) \ : "cc"); \ __rc; \ }) @@ -324,28 +355,28 @@ do { \ \ switch (sizeof(type)) { \ case 1: { \ - u8 __x = 0; \ + unsigned char __x; \ \ __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ *((type *)(dst)) = (type)__x; \ break; \ }; \ case 2: { \ - u16 __x = 0; \ + unsigned short __x; \ \ __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ *((type *)(dst)) = (type)__x; \ break; \ }; \ case 4: { \ - u32 __x = 0; \ + unsigned int __x; \ \ __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ *((type *)(dst)) = (type)__x; \ break; \ }; \ case 8: { \ - u64 __x = 0; \ + unsigned long __x; \ \ __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ *((type *)(dst)) = (type)__x; \ diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 9e9f75ef046a..4260bc5ce7f8 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -28,6 +28,7 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK # ifdef CONFIG_COMPAT +# define __ARCH_WANT_COMPAT_STAT # define __ARCH_WANT_SYS_TIME32 # define __ARCH_WANT_SYS_UTIME32 # endif diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h index 5ebf534ef753..02462e7100c1 100644 --- a/arch/s390/include/asm/unwind.h +++ b/arch/s390/include/asm/unwind.h @@ -4,6 +4,8 @@ #include <linux/sched.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> +#include <linux/llist.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> @@ -36,10 +38,21 @@ struct unwind_state { struct pt_regs *regs; unsigned long sp, ip; int graph_idx; + struct llist_node *kr_cur; bool reliable; bool error; }; +/* Recover the return address modified by kretprobe and ftrace_graph. */ +static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state, + unsigned long ip) +{ + ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *)state->sp); + if (is_kretprobe_trampoline(ip)) + ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur); + return ip; +} + void __unwind_start(struct unwind_state *state, struct task_struct *task, struct pt_regs *regs, unsigned long first_frame); bool unwind_next_frame(struct unwind_state *state); diff --git a/arch/s390/include/asm/user.h b/arch/s390/include/asm/user.h index 0ca572ced21b..8e8aaf48582e 100644 --- a/arch/s390/include/asm/user.h +++ b/arch/s390/include/asm/user.h @@ -67,9 +67,5 @@ struct user { unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _S390_USER_H */ diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 72d3e49c2860..be3ef9dd6972 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -2,7 +2,7 @@ /* * Ultravisor Interfaces * - * Copyright IBM Corp. 2019 + * Copyright IBM Corp. 2019, 2022 * * Author(s): * Vasily Gorbik <gor@linux.ibm.com> @@ -50,8 +50,13 @@ #define UVC_CMD_SET_UNSHARE_ALL 0x0340 #define UVC_CMD_PIN_PAGE_SHARED 0x0341 #define UVC_CMD_UNPIN_PAGE_SHARED 0x0342 +#define UVC_CMD_DUMP_INIT 0x0400 +#define UVC_CMD_DUMP_CONF_STOR_STATE 0x0401 +#define UVC_CMD_DUMP_CPU 0x0402 +#define UVC_CMD_DUMP_COMPLETE 0x0403 #define UVC_CMD_SET_SHARED_ACCESS 0x1000 #define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001 +#define UVC_CMD_RETR_ATTEST 0x1020 /* Bits in installed uv calls */ enum uv_cmds_inst { @@ -76,10 +81,16 @@ enum uv_cmds_inst { BIT_UVC_CMD_UNSHARE_ALL = 20, BIT_UVC_CMD_PIN_PAGE_SHARED = 21, BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22, + BIT_UVC_CMD_DUMP_INIT = 24, + BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25, + BIT_UVC_CMD_DUMP_CPU = 26, + BIT_UVC_CMD_DUMP_COMPLETE = 27, + BIT_UVC_CMD_RETR_ATTEST = 28, }; enum uv_feat_ind { BIT_UV_FEAT_MISC = 0, + BIT_UV_FEAT_AIV = 1, }; struct uv_cb_header { @@ -91,23 +102,32 @@ struct uv_cb_header { /* Query Ultravisor Information */ struct uv_cb_qui { - struct uv_cb_header header; - u64 reserved08; - u64 inst_calls_list[4]; - u64 reserved30[2]; - u64 uv_base_stor_len; - u64 reserved48; - u64 conf_base_phys_stor_len; - u64 conf_base_virt_stor_len; - u64 conf_virt_var_stor_len; - u64 cpu_stor_len; - u32 reserved70[3]; - u32 max_num_sec_conf; - u64 max_guest_stor_addr; - u8 reserved88[158 - 136]; - u16 max_guest_cpu_id; - u64 uv_feature_indications; - u8 reserveda0[200 - 168]; + struct uv_cb_header header; /* 0x0000 */ + u64 reserved08; /* 0x0008 */ + u64 inst_calls_list[4]; /* 0x0010 */ + u64 reserved30[2]; /* 0x0030 */ + u64 uv_base_stor_len; /* 0x0040 */ + u64 reserved48; /* 0x0048 */ + u64 conf_base_phys_stor_len; /* 0x0050 */ + u64 conf_base_virt_stor_len; /* 0x0058 */ + u64 conf_virt_var_stor_len; /* 0x0060 */ + u64 cpu_stor_len; /* 0x0068 */ + u32 reserved70[3]; /* 0x0070 */ + u32 max_num_sec_conf; /* 0x007c */ + u64 max_guest_stor_addr; /* 0x0080 */ + u8 reserved88[158 - 136]; /* 0x0088 */ + u16 max_guest_cpu_id; /* 0x009e */ + u64 uv_feature_indications; /* 0x00a0 */ + u64 reserveda8; /* 0x00a8 */ + u64 supp_se_hdr_versions; /* 0x00b0 */ + u64 supp_se_hdr_pcf; /* 0x00b8 */ + u64 reservedc0; /* 0x00c0 */ + u64 conf_dump_storage_state_len; /* 0x00c8 */ + u64 conf_dump_finalize_len; /* 0x00d0 */ + u64 reservedd8; /* 0x00d8 */ + u64 supp_att_req_hdr_ver; /* 0x00e0 */ + u64 supp_att_pflags; /* 0x00e8 */ + u8 reservedf0[256 - 240]; /* 0x00f0 */ } __packed __aligned(8); /* Initialize Ultravisor */ @@ -218,6 +238,50 @@ struct uv_cb_share { u64 reserved28; } __packed __aligned(8); +/* Retrieve Attestation Measurement */ +struct uv_cb_attest { + struct uv_cb_header header; /* 0x0000 */ + u64 reserved08[2]; /* 0x0008 */ + u64 arcb_addr; /* 0x0018 */ + u64 cont_token; /* 0x0020 */ + u8 reserved28[6]; /* 0x0028 */ + u16 user_data_len; /* 0x002e */ + u8 user_data[256]; /* 0x0030 */ + u32 reserved130[3]; /* 0x0130 */ + u32 meas_len; /* 0x013c */ + u64 meas_addr; /* 0x0140 */ + u8 config_uid[16]; /* 0x0148 */ + u32 reserved158; /* 0x0158 */ + u32 add_data_len; /* 0x015c */ + u64 add_data_addr; /* 0x0160 */ + u64 reserved168[4]; /* 0x0168 */ +} __packed __aligned(8); + +struct uv_cb_dump_cpu { + struct uv_cb_header header; + u64 reserved08[2]; + u64 cpu_handle; + u64 dump_area_origin; + u64 reserved28[5]; +} __packed __aligned(8); + +struct uv_cb_dump_stor_state { + struct uv_cb_header header; + u64 reserved08[2]; + u64 config_handle; + u64 dump_area_origin; + u64 gaddr; + u64 reserved28[4]; +} __packed __aligned(8); + +struct uv_cb_dump_complete { + struct uv_cb_header header; + u64 reserved08[2]; + u64 config_handle; + u64 dump_area_origin; + u64 reserved30[5]; +} __packed __aligned(8); + static inline int __uv_call(unsigned long r1, unsigned long r2) { int cc; @@ -285,6 +349,12 @@ struct uv_info { unsigned int max_num_sec_conf; unsigned short max_guest_cpu_id; unsigned long uv_feature_indications; + unsigned long supp_se_hdr_ver; + unsigned long supp_se_hdr_pcf; + unsigned long conf_dump_storage_state_len; + unsigned long conf_dump_finalize_len; + unsigned long supp_att_req_hdr_ver; + unsigned long supp_att_pflags; }; extern struct uv_info uv_info; @@ -356,6 +426,7 @@ static inline int is_prot_virt_host(void) } int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); +int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr); int uv_destroy_owned_page(unsigned long paddr); int uv_convert_from_secure(unsigned long paddr); int uv_convert_owned_from_secure(unsigned long paddr); diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h index 0c05a673811c..95480ed9149e 100644 --- a/arch/s390/include/asm/vx-insn.h +++ b/arch/s390/include/asm/vx-insn.h @@ -366,17 +366,27 @@ .macro VLM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto - GR_NUM b2, \base /* Base register */ + GR_NUM b2, \base .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC \hint, 0x36, v1, v3 .endm +/* VECTOR STORE */ +.macro VST vr1, disp, index="%r0", base + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (x2&15) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x0E, v1 +.endm + /* VECTOR STORE MULTIPLE */ .macro VSTM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto - GR_NUM b2, \base /* Base register */ + GR_NUM b2, \base .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC \hint, 0x3E, v1, v3 @@ -411,6 +421,81 @@ VUPLL \vr1, \vr2, 2 .endm +/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ +.macro VPDI vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x84, v1, v2, v3 +.endm + +/* VECTOR REPLICATE */ +.macro VREP vr1, vr3, imm2, m4 + VX_NUM v1, \vr1 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word \imm2 + MRXBOPC \m4, 0x4D, v1, v3 +.endm +.macro VREPB vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 0 +.endm +.macro VREPH vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 1 +.endm +.macro VREPF vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 2 +.endm +.macro VREPG vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 3 +.endm + +/* VECTOR MERGE HIGH */ +.macro VMRH vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x61, v1, v2, v3 +.endm +.macro VMRHB vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 0 +.endm +.macro VMRHH vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 1 +.endm +.macro VMRHF vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 2 +.endm +.macro VMRHG vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR MERGE LOW */ +.macro VMRL vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x60, v1, v2, v3 +.endm +.macro VMRLB vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 0 +.endm +.macro VMRLH vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 1 +.endm +.macro VMRLF vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 2 +.endm +.macro VMRLG vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 3 +.endm + /* Vector integer instructions */ @@ -557,5 +642,37 @@ VESRAV \vr1, \vr2, \vr3, 3 .endm +/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ +.macro VERLL vr1, vr3, disp, base="%r0", m4 + VX_NUM v1, \vr1 + VX_NUM v3, \vr3 + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m4, 0x33, v1, v3 +.endm +.macro VERLLB vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 0 +.endm +.macro VERLLH vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 1 +.endm +.macro VERLLF vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 2 +.endm +.macro VERLLG vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 3 +.endm + +/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ +.macro VSLDB vr1, vr2, vr3, imm4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) | (\imm4) + MRXBOPC 0, 0x77, v1, v2, v3 +.endm + #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_VX_INSN_H */ |