aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/abs_lowcore.h27
-rw-r--r--arch/s390/include/asm/access-regs.h38
-rw-r--r--arch/s390/include/asm/airq.h8
-rw-r--r--arch/s390/include/asm/ap.h261
-rw-r--r--arch/s390/include/asm/appldata.h6
-rw-r--r--arch/s390/include/asm/asm-extable.h31
-rw-r--r--arch/s390/include/asm/asm-prototypes.h6
-rw-r--r--arch/s390/include/asm/bitops.h73
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/s390/include/asm/bugs.h21
-rw-r--r--arch/s390/include/asm/ccwdev.h6
-rw-r--r--arch/s390/include/asm/checksum.h28
-rw-r--r--arch/s390/include/asm/cio.h9
-rw-r--r--arch/s390/include/asm/cmpxchg.h149
-rw-r--r--arch/s390/include/asm/compat.h2
-rw-r--r--arch/s390/include/asm/cpacf.h7
-rw-r--r--arch/s390/include/asm/cpu_mcf.h112
-rw-r--r--arch/s390/include/asm/cpu_mf.h82
-rw-r--r--arch/s390/include/asm/cpufeature.h23
-rw-r--r--arch/s390/include/asm/cputime.h19
-rw-r--r--arch/s390/include/asm/ctl_reg.h145
-rw-r--r--arch/s390/include/asm/ctlreg.h255
-rw-r--r--arch/s390/include/asm/debug.h10
-rw-r--r--arch/s390/include/asm/diag.h34
-rw-r--r--arch/s390/include/asm/dma-types.h103
-rw-r--r--arch/s390/include/asm/dma.h10
-rw-r--r--arch/s390/include/asm/eadm.h5
-rw-r--r--arch/s390/include/asm/entry-common.h10
-rw-r--r--arch/s390/include/asm/facility.h6
-rw-r--r--arch/s390/include/asm/fault.h28
-rw-r--r--arch/s390/include/asm/fcx.h15
-rw-r--r--arch/s390/include/asm/fpu-insn-asm.h (renamed from arch/s390/include/asm/vx-insn.h)72
-rw-r--r--arch/s390/include/asm/fpu-insn.h486
-rw-r--r--arch/s390/include/asm/fpu-types.h51
-rw-r--r--arch/s390/include/asm/fpu.h295
-rw-r--r--arch/s390/include/asm/fpu/api.h119
-rw-r--r--arch/s390/include/asm/fpu/internal.h62
-rw-r--r--arch/s390/include/asm/fpu/types.h38
-rw-r--r--arch/s390/include/asm/ftrace.h48
-rw-r--r--arch/s390/include/asm/futex.h3
-rw-r--r--arch/s390/include/asm/gmap.h39
-rw-r--r--arch/s390/include/asm/hugetlb.h14
-rw-r--r--arch/s390/include/asm/idals.h180
-rw-r--r--arch/s390/include/asm/idle.h5
-rw-r--r--arch/s390/include/asm/io.h21
-rw-r--r--arch/s390/include/asm/ipl.h11
-rw-r--r--arch/s390/include/asm/irq.h23
-rw-r--r--arch/s390/include/asm/irq_work.h2
-rw-r--r--arch/s390/include/asm/jump_label.h4
-rw-r--r--arch/s390/include/asm/kasan.h35
-rw-r--r--arch/s390/include/asm/kfence.h2
-rw-r--r--arch/s390/include/asm/kprobes.h7
-rw-r--r--arch/s390/include/asm/kvm_host.h73
-rw-r--r--arch/s390/include/asm/linkage.h2
-rw-r--r--arch/s390/include/asm/lowcore.h25
-rw-r--r--arch/s390/include/asm/maccess.h20
-rw-r--r--arch/s390/include/asm/mem_detect.h94
-rw-r--r--arch/s390/include/asm/mem_encrypt.h4
-rw-r--r--arch/s390/include/asm/mmu.h18
-rw-r--r--arch/s390/include/asm/mmu_context.h13
-rw-r--r--arch/s390/include/asm/msi.h17
-rw-r--r--arch/s390/include/asm/nmi.h5
-rw-r--r--arch/s390/include/asm/nospec-insn.h3
-rw-r--r--arch/s390/include/asm/os_info.h10
-rw-r--r--arch/s390/include/asm/page-states.h59
-rw-r--r--arch/s390/include/asm/page.h56
-rw-r--r--arch/s390/include/asm/pai.h15
-rw-r--r--arch/s390/include/asm/pci.h32
-rw-r--r--arch/s390/include/asm/pci_clp.h12
-rw-r--r--arch/s390/include/asm/pci_dma.h120
-rw-r--r--arch/s390/include/asm/pci_insn.h29
-rw-r--r--arch/s390/include/asm/pci_io.h32
-rw-r--r--arch/s390/include/asm/percpu.h36
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/pfault.h26
-rw-r--r--arch/s390/include/asm/pgalloc.h13
-rw-r--r--arch/s390/include/asm/pgtable.h187
-rw-r--r--arch/s390/include/asm/physmem_info.h173
-rw-r--r--arch/s390/include/asm/pkey.h4
-rw-r--r--arch/s390/include/asm/processor.h152
-rw-r--r--arch/s390/include/asm/ptdump.h14
-rw-r--r--arch/s390/include/asm/ptrace.h60
-rw-r--r--arch/s390/include/asm/qdio.h17
-rw-r--r--arch/s390/include/asm/rwonce.h31
-rw-r--r--arch/s390/include/asm/sclp.h11
-rw-r--r--arch/s390/include/asm/scsw.h12
-rw-r--r--arch/s390/include/asm/sections.h4
-rw-r--r--arch/s390/include/asm/serial.h7
-rw-r--r--arch/s390/include/asm/set_memory.h74
-rw-r--r--arch/s390/include/asm/setup.h33
-rw-r--r--arch/s390/include/asm/shmparam.h12
-rw-r--r--arch/s390/include/asm/smp.h3
-rw-r--r--arch/s390/include/asm/softirq_stack.h3
-rw-r--r--arch/s390/include/asm/stacktrace.h61
-rw-r--r--arch/s390/include/asm/string.h15
-rw-r--r--arch/s390/include/asm/switch_to.h49
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h151
-rw-r--r--arch/s390/include/asm/sysinfo.h4
-rw-r--r--arch/s390/include/asm/termios.h26
-rw-r--r--arch/s390/include/asm/thread_info.h13
-rw-r--r--arch/s390/include/asm/timex.h13
-rw-r--r--arch/s390/include/asm/tlb.h42
-rw-r--r--arch/s390/include/asm/tpi.h13
-rw-r--r--arch/s390/include/asm/uaccess.h210
-rw-r--r--arch/s390/include/asm/unwind.h12
-rw-r--r--arch/s390/include/asm/uv.h116
-rw-r--r--arch/s390/include/asm/vdso/data.h1
-rw-r--r--arch/s390/include/asm/vga.h7
-rw-r--r--arch/s390/include/asm/word-at-a-time.h65
-rw-r--r--arch/s390/include/uapi/asm/cmb.h2
-rw-r--r--arch/s390/include/uapi/asm/dasd.h18
-rw-r--r--arch/s390/include/uapi/asm/fs3270.h25
-rw-r--r--arch/s390/include/uapi/asm/ipl.h29
-rw-r--r--arch/s390/include/uapi/asm/kvm.h332
-rw-r--r--arch/s390/include/uapi/asm/pkey.h23
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h123
-rw-r--r--arch/s390/include/uapi/asm/raw3270.h75
-rw-r--r--arch/s390/include/uapi/asm/statfs.h4
-rw-r--r--arch/s390/include/uapi/asm/termios.h50
-rw-r--r--arch/s390/include/uapi/asm/types.h15
-rw-r--r--arch/s390/include/uapi/asm/uvdevice.h53
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h3
123 files changed, 4156 insertions, 1958 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 1a18d7b82f86..4b904110d27c 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -5,6 +5,5 @@ generated-y += syscall_table.h
generated-y += unistd_nr.h
generic-y += asm-offsets.h
-generic-y += export.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/abs_lowcore.h b/arch/s390/include/asm/abs_lowcore.h
new file mode 100644
index 000000000000..6f264b79e377
--- /dev/null
+++ b/arch/s390/include/asm/abs_lowcore.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ABS_LOWCORE_H
+#define _ASM_S390_ABS_LOWCORE_H
+
+#include <asm/lowcore.h>
+
+#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore))
+
+extern unsigned long __abs_lowcore;
+
+int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc);
+void abs_lowcore_unmap(int cpu);
+
+static inline struct lowcore *get_abs_lowcore(void)
+{
+ int cpu;
+
+ cpu = get_cpu();
+ return ((struct lowcore *)__abs_lowcore) + cpu;
+}
+
+static inline void put_abs_lowcore(struct lowcore *lc)
+{
+ put_cpu();
+}
+
+#endif /* _ASM_S390_ABS_LOWCORE_H */
diff --git a/arch/s390/include/asm/access-regs.h b/arch/s390/include/asm/access-regs.h
new file mode 100644
index 000000000000..1a6412d9f5ad
--- /dev/null
+++ b/arch/s390/include/asm/access-regs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2024
+ */
+
+#ifndef __ASM_S390_ACCESS_REGS_H
+#define __ASM_S390_ACCESS_REGS_H
+
+#include <linux/instrumented.h>
+#include <asm/sigcontext.h>
+
+struct access_regs {
+ unsigned int regs[NUM_ACRS];
+};
+
+static inline void save_access_regs(unsigned int *acrs)
+{
+ struct access_regs *regs = (struct access_regs *)acrs;
+
+ instrument_write(regs, sizeof(*regs));
+ asm volatile("stamy 0,15,%[regs]"
+ : [regs] "=QS" (*regs)
+ :
+ : "memory");
+}
+
+static inline void restore_access_regs(unsigned int *acrs)
+{
+ struct access_regs *regs = (struct access_regs *)acrs;
+
+ instrument_read(regs, sizeof(*regs));
+ asm volatile("lamy 0,15,%[regs]"
+ :
+ : [regs] "QS" (*regs)
+ : "memory");
+}
+
+#endif /* __ASM_S390_ACCESS_REGS_H */
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index 01936fdfaddb..c4c28c2609a5 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -12,12 +12,12 @@
#include <linux/bit_spinlock.h>
#include <linux/dma-mapping.h>
+#include <asm/tpi.h>
struct airq_struct {
struct hlist_node list; /* Handler queueing. */
- void (*handler)(struct airq_struct *airq, bool floating);
+ void (*handler)(struct airq_struct *airq, struct tpi_info *tpi_info);
u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
- u8 lsi_mask; /* Local-Summary-Indicator mask */
u8 isc; /* Interrupt-subclass */
u8 flags;
};
@@ -46,8 +46,10 @@ struct airq_iv {
#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
#define AIRQ_IV_DATA 8 /* Allocate the data array */
#define AIRQ_IV_CACHELINE 16 /* Cacheline alignment for the vector */
+#define AIRQ_IV_GUESTVEC 32 /* Vector is a pinned guest page */
-struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags,
+ unsigned long *vec);
void airq_iv_release(struct airq_iv *iv);
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index b515cfa62bd9..43ac4a64f49b 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -43,10 +43,24 @@ struct ap_queue_status {
unsigned int queue_empty : 1;
unsigned int replies_waiting : 1;
unsigned int queue_full : 1;
- unsigned int _pad1 : 4;
+ unsigned int : 3;
+ unsigned int async : 1;
unsigned int irq_enabled : 1;
unsigned int response_code : 8;
- unsigned int _pad2 : 16;
+ unsigned int : 16;
+};
+
+/*
+ * AP queue status reg union to access the reg1
+ * register with the lower 32 bits comprising the
+ * ap queue status.
+ */
+union ap_queue_status_reg {
+ unsigned long value;
+ struct {
+ u32 _pad;
+ struct ap_queue_status status;
+ };
};
/**
@@ -73,16 +87,55 @@ static inline bool ap_instructions_available(void)
return reg1 != 0;
}
+/* TAPQ register GR2 response struct */
+struct ap_tapq_hwinfo {
+ union {
+ unsigned long value;
+ struct {
+ unsigned int fac : 32; /* facility bits */
+ unsigned int apinfo : 32; /* ap type, ... */
+ };
+ struct {
+ unsigned int apsc : 1; /* APSC */
+ unsigned int mex4k : 1; /* AP4KM */
+ unsigned int crt4k : 1; /* AP4KC */
+ unsigned int cca : 1; /* D */
+ unsigned int accel : 1; /* A */
+ unsigned int ep11 : 1; /* X */
+ unsigned int apxa : 1; /* APXA */
+ unsigned int : 1;
+ unsigned int class : 8;
+ unsigned int bs : 2; /* SE bind/assoc */
+ unsigned int : 14;
+ unsigned int at : 8; /* ap type */
+ unsigned int nd : 8; /* nr of domains */
+ unsigned int : 4;
+ unsigned int ml : 4; /* apxl ml */
+ unsigned int : 4;
+ unsigned int qd : 4; /* queue depth */
+ };
+ };
+};
+
+/*
+ * Convenience defines to be used with the bs field from struct ap_tapq_gr2
+ */
+#define AP_BS_Q_USABLE 0
+#define AP_BS_Q_USABLE_NO_SECURE_KEY 1
+#define AP_BS_Q_AVAIL_FOR_BINDING 2
+#define AP_BS_Q_UNUSABLE 3
+
/**
* ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number
- * @info: Pointer to queue descriptor
+ * @info: Pointer to tapq hwinfo struct
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid,
+ struct ap_tapq_hwinfo *info)
{
- struct ap_queue_status reg1;
+ union ap_queue_status_reg reg1;
unsigned long reg2;
asm volatile(
@@ -91,25 +144,24 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
" .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
" lgr %[reg2],2\n" /* gr2 into reg2 */
- : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
+ : [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
if (info)
- *info = reg2;
- return reg1;
+ info->value = reg2;
+ return reg1.status;
}
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
+ * @info: Ptr to tapq gr2 struct
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
+ struct ap_tapq_hwinfo *info)
{
if (tbit)
qid |= 1UL << 23; /* set T bit*/
@@ -119,43 +171,51 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
/**
* ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
- struct ap_queue_status reg1;
+ union ap_queue_status_reg reg1;
+
+ if (fbit)
+ reg0 |= 1UL << 22;
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- : [reg1] "=&d" (reg1)
+ : [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0)
: "cc", "0", "1");
- return reg1;
+ return reg1.status;
}
/**
* ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
- struct ap_queue_status reg1;
+ union ap_queue_status_reg reg1;
+
+ if (fbit)
+ reg0 |= 1UL << 22;
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- : [reg1] "=&d" (reg1)
+ : [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0)
: "cc", "0", "1");
- return reg1;
+ return reg1.status;
}
/**
@@ -167,15 +227,16 @@ struct ap_config_info {
unsigned int apxa : 1; /* N bit */
unsigned int qact : 1; /* C bit */
unsigned int rc8a : 1; /* R bit */
- unsigned char _reserved1 : 4;
- unsigned char _reserved2[3];
- unsigned char Na; /* max # of APs - 1 */
- unsigned char Nd; /* max # of Domains - 1 */
- unsigned char _reserved3[10];
+ unsigned int : 4;
+ unsigned int apsb : 1; /* B bit */
+ unsigned int : 23;
+ unsigned char na; /* max # of APs - 1 */
+ unsigned char nd; /* max # of Domains - 1 */
+ unsigned char _reserved0[10];
unsigned int apm[8]; /* AP ID mask */
unsigned int aqm[8]; /* AP (usage) queue mask */
unsigned int adm[8]; /* AP (control) domain mask */
- unsigned char _reserved4[16];
+ unsigned char _reserved1[16];
} __aligned(8);
/**
@@ -209,41 +270,40 @@ static inline int ap_qci(struct ap_config_info *config)
* parameter to the PQAP(AQIC) instruction. For details please
* see the AR documentation.
*/
-struct ap_qirq_ctrl {
- unsigned int _res1 : 8;
- unsigned int zone : 8; /* zone info */
- unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
- unsigned int _res2 : 4;
- unsigned int gisc : 3; /* guest isc field */
- unsigned int _res3 : 6;
- unsigned int gf : 2; /* gisa format */
- unsigned int _res4 : 1;
- unsigned int gisa : 27; /* gisa origin */
- unsigned int _res5 : 1;
- unsigned int isc : 3; /* irq sub class */
+union ap_qirq_ctrl {
+ unsigned long value;
+ struct {
+ unsigned int : 8;
+ unsigned int zone : 8; /* zone info */
+ unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
+ unsigned int : 4;
+ unsigned int gisc : 3; /* guest isc field */
+ unsigned int : 6;
+ unsigned int gf : 2; /* gisa format */
+ unsigned int : 1;
+ unsigned int gisa : 27; /* gisa origin */
+ unsigned int : 1;
+ unsigned int isc : 3; /* irq sub class */
+ };
};
/**
* ap_aqic(): Control interruption for a specific AP.
* @qid: The AP queue number
* @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
+ * @pa_ind: Physical address of the notification indicator byte
*
* Returns AP queue status.
*/
static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
+ union ap_qirq_ctrl qirqctrl,
+ phys_addr_t pa_ind)
{
unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */
- union {
- unsigned long value;
- struct ap_qirq_ctrl qirqctrl;
- struct ap_queue_status status;
- } reg1;
- unsigned long reg2 = virt_to_phys(ind);
+ union ap_queue_status_reg reg1;
+ unsigned long reg2 = pa_ind;
- reg1.qirqctrl = qirqctrl;
+ reg1.value = qirqctrl.value;
asm volatile(
" lgr 0,%[reg0]\n" /* qid param into gr0 */
@@ -251,9 +311,9 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
" lgr 2,%[reg2]\n" /* ni addr into gr2 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- : [reg1] "+&d" (reg1)
+ : [reg1] "+&d" (reg1.value)
: [reg0] "d" (reg0), [reg2] "d" (reg2)
- : "cc", "0", "1", "2");
+ : "cc", "memory", "0", "1", "2");
return reg1.status;
}
@@ -276,7 +336,7 @@ union ap_qact_ap_info {
};
/**
- * ap_qact(): Query AP combatibility type.
+ * ap_qact(): Query AP compatibility type.
* @qid: The AP queue number
* @apinfo: On input the info about the AP queue. On output the
* alternate AP queue info provided by the qact function
@@ -288,10 +348,7 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
union ap_qact_ap_info *apinfo)
{
unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22);
- union {
- unsigned long value;
- struct ap_queue_status status;
- } reg1;
+ union ap_queue_status_reg reg1;
unsigned long reg2;
reg1.value = apinfo->val;
@@ -302,13 +359,66 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
" .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
" lgr %[reg2],2\n" /* qact out info into reg2 */
- : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
+ : [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2)
: [reg0] "d" (reg0)
: "cc", "0", "1", "2");
apinfo->val = reg2;
return reg1.status;
}
+/*
+ * ap_bapq(): SE bind AP queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
+{
+ unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1");
+
+ return reg1.status;
+}
+
+/*
+ * ap_aapq(): SE associate AP queue.
+ * @qid: The AP queue number
+ * @sec_idx: The secret index
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
+{
+ unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
+ unsigned long reg2 = sec_idx;
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " lgr 2,%[reg2]\n" /* secret index into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0), [reg2] "d" (reg2)
+ : "cc", "0", "1", "2");
+
+ return reg1.status;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -327,7 +437,7 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
{
unsigned long reg0 = qid | 0x40000000UL; /* 0x4... is last msg part */
union register_pair nqap_r1, nqap_r2;
- struct ap_queue_status reg1;
+ union ap_queue_status_reg reg1;
nqap_r1.even = (unsigned int)(psmid >> 32);
nqap_r1.odd = psmid & 0xffffffff;
@@ -339,21 +449,22 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
"0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n"
" brc 2,0b\n" /* handle partial completion */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1),
+ : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value),
[nqap_r2] "+&d" (nqap_r2.pair)
: [nqap_r1] "d" (nqap_r1.pair)
: "cc", "memory", "0", "1");
- return reg1;
+ return reg1.status;
}
/**
* ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @reslength: Resitual length on return
- * @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
+ * @msg: Pointer to message buffer
+ * @msglen: Message buffer size
+ * @length: Pointer to length of actually written bytes
+ * @reslength: Residual length on return
+ * @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
@@ -377,20 +488,21 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* *resgr0 is to be used instead of qid to further process this entry.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length,
+ unsigned long *psmid,
+ void *msg, size_t msglen,
+ size_t *length,
size_t *reslength,
unsigned long *resgr0)
{
unsigned long reg0 = resgr0 && *resgr0 ? *resgr0 : qid | 0x80000000UL;
- struct ap_queue_status reg1;
+ union ap_queue_status_reg reg1;
unsigned long reg2;
union register_pair rp1, rp2;
rp1.even = 0UL;
rp1.odd = 0UL;
rp2.even = (unsigned long)msg;
- rp2.odd = (unsigned long)length;
+ rp2.odd = (unsigned long)msglen;
asm volatile(
" lgr 0,%[reg0]\n" /* qid param into gr0 */
@@ -402,8 +514,9 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
"2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
" lgr %[reg2],2\n" /* gr2 (res length) into reg2 */
- : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1), [reg2] "=&d" (reg2),
- [rp1] "+&d" (rp1.pair), [rp2] "+&d" (rp2.pair)
+ : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value),
+ [reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair),
+ [rp2] "+&d" (rp2.pair)
:
: "cc", "memory", "0", "1", "2");
@@ -415,16 +528,20 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
* Signal the caller that this dqap is only partially received
* with a special status response code 0xFF and *resgr0 updated
*/
- reg1.response_code = 0xFF;
+ reg1.status.response_code = 0xFF;
if (resgr0)
*resgr0 = reg0;
} else {
- *psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
+ *psmid = (rp1.even << 32) + rp1.odd;
if (resgr0)
*resgr0 = 0;
}
- return reg1;
+ /* update *length with the nr of bytes stored into the msg buffer */
+ if (length)
+ *length = msglen - rp2.odd;
+
+ return reg1.status;
}
/*
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index c5bd9f4437e5..a92ebbc7aa7a 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -8,8 +8,8 @@
#ifndef _ASM_S390_APPLDATA_H
#define _ASM_S390_APPLDATA_H
+#include <linux/io.h>
#include <asm/diag.h>
-#include <asm/io.h>
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
@@ -54,13 +54,13 @@ static inline int appldata_asm(struct appldata_parameter_list *parm_list,
parm_list->function = fn;
parm_list->parlist_length = sizeof(*parm_list);
parm_list->buffer_length = length;
- parm_list->product_id_addr = (unsigned long) id;
+ parm_list->product_id_addr = virt_to_phys(id);
parm_list->buffer_addr = virt_to_phys(buffer);
diag_stat_inc(DIAG_STAT_X0DC);
asm volatile(
" diag %1,%0,0xdc"
: "=d" (ry)
- : "d" (parm_list), "m" (*parm_list), "m" (*id)
+ : "d" (virt_to_phys(parm_list)), "m" (*parm_list), "m" (*id)
: "cc");
return ry;
}
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index b74f1070ddb2..4a6b0a8b6412 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -12,6 +12,8 @@
#define EX_TYPE_UA_STORE 3
#define EX_TYPE_UA_LOAD_MEM 4
#define EX_TYPE_UA_LOAD_REG 5
+#define EX_TYPE_UA_LOAD_REGPAIR 6
+#define EX_TYPE_ZEROPAD 7
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0)
@@ -22,18 +24,9 @@
#define EX_DATA_LEN_SHIFT 8
#define EX_DATA_LEN GENMASK(11, 8)
-#define __EX_TABLE(_section, _fault, _target, _type) \
- stringify_in_c(.section _section,"a";) \
- stringify_in_c(.align 4;) \
- stringify_in_c(.long (_fault) - .;) \
- stringify_in_c(.long (_target) - .;) \
- stringify_in_c(.short (_type);) \
- stringify_in_c(.short 0;) \
- stringify_in_c(.previous)
-
-#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
+#define __EX_TABLE(_section, _fault, _target, _type, _regerr, _regaddr, _len) \
stringify_in_c(.section _section,"a";) \
- stringify_in_c(.align 4;) \
+ stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
@@ -71,18 +64,24 @@
stringify_in_c(.previous)
#define EX_TABLE(_fault, _target) \
- __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_AMODE31(_fault, _target) \
- __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
+ __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
+
+#define EX_TABLE_UA_LOAD_REGPAIR(_fault, _target, _regerr, _regzero) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REGPAIR, _regerr, _regzero, 0)
+
+#define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
index c37eb921bfbf..56096ae26f29 100644
--- a/arch/s390/include/asm/asm-prototypes.h
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -3,7 +3,11 @@
#include <linux/kvm_host.h>
#include <linux/ftrace.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
#include <asm-generic/asm-prototypes.h>
+__int128_t __ashlti3(__int128_t a, int b);
+__int128_t __ashrti3(__int128_t a, int b);
+__int128_t __lshrti3(__int128_t a, int b);
+
#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 191dc7898b0f..c467dffa8c12 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -113,76 +113,71 @@ static inline bool arch_test_and_change_bit(unsigned long nr,
return old & mask;
}
-static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
- *addr |= mask;
+ *p |= mask;
}
-static inline void arch___clear_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
- *addr &= ~mask;
+ *p &= ~mask;
}
-static inline void arch___change_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
- *addr ^= mask;
+ *p ^= mask;
}
-static inline bool arch___test_and_set_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;
- old = *addr;
- *addr |= mask;
+ old = *p;
+ *p |= mask;
return old & mask;
}
-static inline bool arch___test_and_clear_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;
- old = *addr;
- *addr &= ~mask;
+ old = *p;
+ *p &= ~mask;
return old & mask;
}
-static inline bool arch___test_and_change_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;
- old = *addr;
- *addr ^= mask;
+ old = *p;
+ *p ^= mask;
return old & mask;
}
-static inline bool arch_test_bit(unsigned long nr,
- const volatile unsigned long *ptr)
-{
- const volatile unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
-
- return *addr & mask;
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *ptr)
@@ -206,6 +201,16 @@ static inline void arch___clear_bit_unlock(unsigned long nr,
arch___clear_bit(nr, ptr);
}
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *ptr)
+{
+ unsigned long old;
+
+ old = __atomic64_xor_barrier(mask, (long *)ptr);
+ return old & BIT(7);
+}
+#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
+
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-non-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index aebe1e22c7be..c500d45fb465 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -14,7 +14,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"1: .asciz \""__FILE__"\"\n" \
".previous\n" \
- ".section __bug_table,\"awM\",@progbits,%2\n" \
+ ".section __bug_table,\"aw\"\n" \
"2: .long 0b-.\n" \
" .long 1b-.\n" \
" .short %0,%1\n" \
@@ -30,7 +30,7 @@
#define __EMIT_BUG(x) do { \
asm_inline volatile( \
"0: mc 0,0\n" \
- ".section __bug_table,\"awM\",@progbits,%1\n" \
+ ".section __bug_table,\"aw\"\n" \
"1: .long 0b-.\n" \
" .short %0\n" \
" .org 1b+%1\n" \
diff --git a/arch/s390/include/asm/bugs.h b/arch/s390/include/asm/bugs.h
deleted file mode 100644
index aa42a179be33..000000000000
--- a/arch/s390/include/asm/bugs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Derived from "include/asm-i386/bugs.h"
- * Copyright (C) 1994 Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-static inline void check_bugs(void)
-{
- /* s390 has no bugs ... */
-}
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index d4e90f2ba77e..436365ff6c19 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -15,6 +15,7 @@
#include <asm/fcx.h>
#include <asm/irq.h>
#include <asm/schid.h>
+#include <linux/mutex.h>
/* structs from asm/cio.h */
struct irb;
@@ -87,6 +88,7 @@ struct ccw_device {
spinlock_t *ccwlock;
/* private: */
struct ccw_device_private *private; /* cio private information */
+ struct mutex reg_mutex;
/* public: */
struct ccw_device_id id;
struct ccw_driver *drv;
@@ -214,9 +216,9 @@ extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
extern void ccw_device_destroy_console(struct ccw_device *);
extern int ccw_device_enable_console(struct ccw_device *);
extern void ccw_device_wait_idle(struct ccw_device *);
-extern int ccw_device_force_console(struct ccw_device *);
-extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size);
+extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
+ dma32_t *dma_handle);
extern void ccw_device_dma_free(struct ccw_device *cdev,
void *cpu_addr, size_t size);
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index cdd19d326345..b89159591ca0 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -12,35 +12,29 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
-#include <linux/uaccess.h>
+#include <linux/instrumented.h>
#include <linux/in6.h>
-/*
- * Computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit).
- *
- * Returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic.
- *
- * This function must be called with even lengths, except
- * for the last fragment, which may be odd.
- *
- * It's best to have buff aligned on a 32-bit boundary.
- */
-static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+static inline __wsum cksm(const void *buff, int len, __wsum sum)
{
union register_pair rp = {
- .even = (unsigned long) buff,
- .odd = (unsigned long) len,
+ .even = (unsigned long)buff,
+ .odd = (unsigned long)len,
};
- asm volatile(
+ instrument_read(buff, len);
+ asm volatile("\n"
"0: cksm %[sum],%[rp]\n"
" jo 0b\n"
: [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
return sum;
}
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+
/*
* Fold a partial checksum without adding pseudo headers.
*/
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 1c4f585dd39b..b6b619f340a5 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/genalloc.h>
+#include <asm/dma-types.h>
#include <asm/types.h>
#include <asm/tpi.h>
@@ -32,7 +33,7 @@ struct ccw1 {
__u8 cmd_code;
__u8 flags;
__u16 count;
- __u32 cda;
+ dma32_t cda;
} __attribute__ ((packed,aligned(8)));
/**
@@ -152,8 +153,8 @@ struct sublog {
struct esw0 {
struct sublog sublog;
struct erw erw;
- __u32 faddr[2];
- __u32 saddr;
+ dma32_t faddr[2];
+ dma32_t saddr;
} __attribute__ ((packed));
/**
@@ -364,6 +365,8 @@ extern struct device *cio_get_dma_css_dev(void);
void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
size_t size);
+void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size, dma32_t *dma_handle);
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size);
void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev);
struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages);
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 84c3f0d576c5..aae0315374de 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -14,8 +14,8 @@
void __xchg_called_with_bad_pointer(void);
-static __always_inline unsigned long __xchg(unsigned long x,
- unsigned long address, int size)
+static __always_inline unsigned long
+__arch_xchg(unsigned long x, unsigned long address, int size)
{
unsigned long old;
int shift;
@@ -77,8 +77,8 @@ static __always_inline unsigned long __xchg(unsigned long x,
__typeof__(*(ptr)) __ret; \
\
__ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (unsigned long)(ptr), \
- sizeof(*(ptr))); \
+ __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \
+ sizeof(*(ptr))); \
__ret; \
})
@@ -88,67 +88,90 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
unsigned long old,
unsigned long new, int size)
{
- unsigned long prev, tmp;
- int shift;
-
switch (size) {
- case 1:
+ case 1: {
+ unsigned int prev, shift, mask;
+
shift = (3 ^ (address & 3)) << 3;
address ^= address & 3;
+ old = (old & 0xff) << shift;
+ new = (new & 0xff) << shift;
+ mask = ~(0xff << shift);
asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
+ " l %[prev],%[address]\n"
+ " nr %[prev],%[mask]\n"
+ " xilf %[mask],0xffffffff\n"
+ " or %[new],%[prev]\n"
+ " or %[prev],%[tmp]\n"
+ "0: lr %[tmp],%[prev]\n"
+ " cs %[prev],%[new],%[address]\n"
+ " jnl 1f\n"
+ " xr %[tmp],%[prev]\n"
+ " xr %[new],%[tmp]\n"
+ " nr %[tmp],%[mask]\n"
+ " jz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
- : "d" ((old & 0xff) << shift),
- "d" ((new & 0xff) << shift),
- "d" (~(0xff << shift))
- : "memory", "cc");
+ : [prev] "=&d" (prev),
+ [address] "+Q" (*(int *)address),
+ [tmp] "+&d" (old),
+ [new] "+&d" (new),
+ [mask] "+&d" (mask)
+ :: "memory", "cc");
return prev >> shift;
- case 2:
+ }
+ case 2: {
+ unsigned int prev, shift, mask;
+
shift = (2 ^ (address & 2)) << 3;
address ^= address & 2;
+ old = (old & 0xffff) << shift;
+ new = (new & 0xffff) << shift;
+ mask = ~(0xffff << shift);
asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
+ " l %[prev],%[address]\n"
+ " nr %[prev],%[mask]\n"
+ " xilf %[mask],0xffffffff\n"
+ " or %[new],%[prev]\n"
+ " or %[prev],%[tmp]\n"
+ "0: lr %[tmp],%[prev]\n"
+ " cs %[prev],%[new],%[address]\n"
+ " jnl 1f\n"
+ " xr %[tmp],%[prev]\n"
+ " xr %[new],%[tmp]\n"
+ " nr %[tmp],%[mask]\n"
+ " jz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
- : "d" ((old & 0xffff) << shift),
- "d" ((new & 0xffff) << shift),
- "d" (~(0xffff << shift))
- : "memory", "cc");
+ : [prev] "=&d" (prev),
+ [address] "+Q" (*(int *)address),
+ [tmp] "+&d" (old),
+ [new] "+&d" (new),
+ [mask] "+&d" (mask)
+ :: "memory", "cc");
return prev >> shift;
- case 4:
+ }
+ case 4: {
+ unsigned int prev = old;
+
asm volatile(
- " cs %0,%3,%1\n"
- : "=&d" (prev), "+Q" (*(int *) address)
- : "0" (old), "d" (new)
+ " cs %[prev],%[new],%[address]\n"
+ : [prev] "+&d" (prev),
+ [address] "+Q" (*(int *)address)
+ : [new] "d" (new)
: "memory", "cc");
return prev;
- case 8:
+ }
+ case 8: {
+ unsigned long prev = old;
+
asm volatile(
- " csg %0,%3,%1\n"
- : "=&d" (prev), "+QS" (*(long *) address)
- : "0" (old), "d" (new)
+ " csg %[prev],%[new],%[address]\n"
+ : [prev] "+&d" (prev),
+ [address] "+QS" (*(long *)address)
+ : [new] "d" (new)
: "memory", "cc");
return prev;
}
+ }
__cmpxchg_called_with_bad_pointer();
return old;
}
@@ -167,38 +190,18 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
#define arch_cmpxchg_local arch_cmpxchg
#define arch_cmpxchg64_local arch_cmpxchg
-#define system_has_cmpxchg_double() 1
+#define system_has_cmpxchg128() 1
-static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2,
- unsigned long o1, unsigned long o2,
- unsigned long n1, unsigned long n2)
+static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
{
- union register_pair old = { .even = o1, .odd = o2, };
- union register_pair new = { .even = n1, .odd = n2, };
- int cc;
-
asm volatile(
" cdsg %[old],%[new],%[ptr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc), [old] "+&d" (old.pair)
- : [new] "d" (new.pair),
- [ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2)
+ : [old] "+d" (old), [ptr] "+QS" (*ptr)
+ : [new] "d" (new)
: "memory", "cc");
- return !cc;
+ return old;
}
-#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
-({ \
- typeof(p1) __p1 = (p1); \
- typeof(p2) __p2 = (p2); \
- \
- BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
- BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
- VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- __cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \
- (unsigned long)(o1), (unsigned long)(o2), \
- (unsigned long)(n1), (unsigned long)(n2)); \
-})
+#define arch_cmpxchg128 arch_cmpxchg128
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index a386070f1d56..3cb9d813f022 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -112,7 +112,7 @@ struct compat_statfs64 {
u32 f_namelen;
u32 f_frsize;
u32 f_flags;
- u32 f_spare[4];
+ u32 f_spare[5];
};
/*
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 646b12981f20..b378e2b57ad8 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -2,7 +2,7 @@
/*
* CP Assist for Cryptographic Functions (CPACF)
*
- * Copyright IBM Corp. 2003, 2017
+ * Copyright IBM Corp. 2003, 2023
* Author(s): Thomas Spatzier
* Jan Glauber
* Harald Freudenberger (freude@de.ibm.com)
@@ -132,6 +132,11 @@
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
+#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
+#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
+#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
+#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
+#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
/*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
deleted file mode 100644
index f87a4788c19c..000000000000
--- a/arch/s390/include/asm/cpu_mcf.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Counter facility support definitions for the Linux perf
- *
- * Copyright IBM Corp. 2019
- * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
- */
-#ifndef _ASM_S390_CPU_MCF_H
-#define _ASM_S390_CPU_MCF_H
-
-#include <linux/perf_event.h>
-#include <asm/cpu_mf.h>
-
-enum cpumf_ctr_set {
- CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
- CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
- CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
- CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
- CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
-
- /* Maximum number of counter sets */
- CPUMF_CTR_SET_MAX,
-};
-
-#define CPUMF_LCCTL_ENABLE_SHIFT 16
-#define CPUMF_LCCTL_ACTCTL_SHIFT 0
-
-static inline void ctr_set_enable(u64 *state, u64 ctrsets)
-{
- *state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT;
-}
-
-static inline void ctr_set_disable(u64 *state, u64 ctrsets)
-{
- *state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT);
-}
-
-static inline void ctr_set_start(u64 *state, u64 ctrsets)
-{
- *state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT;
-}
-
-static inline void ctr_set_stop(u64 *state, u64 ctrsets)
-{
- *state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT);
-}
-
-static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
-{
- switch (set) {
- case CPUMF_CTR_SET_BASIC:
- return stcctm(BASIC, range, dest);
- case CPUMF_CTR_SET_USER:
- return stcctm(PROBLEM_STATE, range, dest);
- case CPUMF_CTR_SET_CRYPTO:
- return stcctm(CRYPTO_ACTIVITY, range, dest);
- case CPUMF_CTR_SET_EXT:
- return stcctm(EXTENDED, range, dest);
- case CPUMF_CTR_SET_MT_DIAG:
- return stcctm(MT_DIAG_CLEARING, range, dest);
- case CPUMF_CTR_SET_MAX:
- return 3;
- }
- return 3;
-}
-
-struct cpu_cf_events {
- struct cpumf_ctr_info info;
- atomic_t ctr_set[CPUMF_CTR_SET_MAX];
- atomic64_t alert;
- u64 state; /* For perf_event_open SVC */
- u64 dev_state; /* For /dev/hwctr */
- unsigned int flags;
- size_t used; /* Bytes used in data */
- size_t usedss; /* Bytes used in start/stop */
- unsigned char start[PAGE_SIZE]; /* Counter set at event add */
- unsigned char stop[PAGE_SIZE]; /* Counter set at event delete */
- unsigned char data[PAGE_SIZE]; /* Counter set at /dev/hwctr */
- unsigned int sets; /* # Counter set saved in memory */
-};
-DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
-
-bool kernel_cpumcf_avail(void);
-int __kernel_cpumcf_begin(void);
-unsigned long kernel_cpumcf_alert(int clear);
-void __kernel_cpumcf_end(void);
-
-static inline int kernel_cpumcf_begin(void)
-{
- if (!cpum_cf_avail())
- return -ENODEV;
-
- preempt_disable();
- return __kernel_cpumcf_begin();
-}
-static inline void kernel_cpumcf_end(void)
-{
- __kernel_cpumcf_end();
- preempt_enable();
-}
-
-/* Return true if store counter set multiple instruction is available */
-static inline int stccm_avail(void)
-{
- return test_facility(142);
-}
-
-size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
- struct cpumf_ctr_info *info);
-int cfset_online_cpu(unsigned int cpu);
-int cfset_offline_cpu(unsigned int cpu);
-#endif /* _ASM_S390_CPU_MCF_H */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index feaba12dbecb..a0de5b9b02ea 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -42,7 +42,6 @@ static inline int cpum_sf_avail(void)
return test_facility(40) && test_facility(68);
}
-
struct cpumf_ctr_info {
u16 cfvn;
u16 auth_ctl;
@@ -131,19 +130,21 @@ struct hws_combined_entry {
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
} __packed;
-struct hws_trailer_entry {
- union {
- struct {
- unsigned int f:1; /* 0 - Block Full Indicator */
- unsigned int a:1; /* 1 - Alert request control */
- unsigned int t:1; /* 2 - Timestamp format */
- unsigned int :29; /* 3 - 31: Reserved */
- unsigned int bsdes:16; /* 32-47: size of basic SDE */
- unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
- };
- unsigned long long flags; /* 0 - 63: All indicators */
+union hws_trailer_header {
+ struct {
+ unsigned int f:1; /* 0 - Block Full Indicator */
+ unsigned int a:1; /* 1 - Alert request control */
+ unsigned int t:1; /* 2 - Timestamp format */
+ unsigned int :29; /* 3 - 31: Reserved */
+ unsigned int bsdes:16; /* 32-47: size of basic SDE */
+ unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
+ unsigned long long overflow; /* 64 - Overflow Count */
};
- unsigned long long overflow; /* 64 - sample Overflow count */
+ u128 val;
+};
+
+struct hws_trailer_entry {
+ union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count */
unsigned char timestamp[16]; /* 16 - 31 timestamp */
unsigned long long reserved1; /* 32 -Reserved */
unsigned long long reserved2; /* */
@@ -273,59 +274,4 @@ static inline int lsctl(struct hws_lsctl_request_block *req)
return cc ? -EINVAL : 0;
}
-
-/* Sampling control helper functions */
-
-#include <linux/time.h>
-
-static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
- unsigned long freq)
-{
- return (USEC_PER_SEC / freq) * qsi->cpu_speed;
-}
-
-static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
- unsigned long rate)
-{
- return USEC_PER_SEC * qsi->cpu_speed / rate;
-}
-
-#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
-#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
-
-/* Return TOD timestamp contained in an trailer entry */
-static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
-{
- /* TOD in STCKE format */
- if (te->t)
- return *((unsigned long long *) &te->timestamp[1]);
-
- /* TOD in STCK format */
- return *((unsigned long long *) &te->timestamp[0]);
-}
-
-/* Return pointer to trailer entry of an sample data block */
-static inline unsigned long *trailer_entry_ptr(unsigned long v)
-{
- void *ret;
-
- ret = (void *) v;
- ret += PAGE_SIZE;
- ret -= sizeof(struct hws_trailer_entry);
-
- return (unsigned long *) ret;
-}
-
-/* Return true if the entry in the sample data block table (sdbt)
- * is a link to the next sdbt */
-static inline int is_link_entry(unsigned long *s)
-{
- return *s & 0x1ul ? 1 : 0;
-}
-
-/* Return pointer to the linked sdbt */
-static inline unsigned long *get_next_sdbt(unsigned long *s)
-{
- return (unsigned long *) (*s & ~0x1ul);
-}
#endif /* _ASM_S390_CPU_MF_H */
diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h
index 14cfd48d598e..931204613753 100644
--- a/arch/s390/include/asm/cpufeature.h
+++ b/arch/s390/include/asm/cpufeature.h
@@ -2,28 +2,21 @@
/*
* Module interface for CPU features
*
- * Copyright IBM Corp. 2015
+ * Copyright IBM Corp. 2015, 2022
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#ifndef __ASM_S390_CPUFEATURE_H
#define __ASM_S390_CPUFEATURE_H
-#include <asm/elf.h>
+enum {
+ S390_CPU_FEATURE_MSA,
+ S390_CPU_FEATURE_VXRS,
+ S390_CPU_FEATURE_UV,
+ MAX_CPU_FEATURES
+};
-/* Hardware features on Linux on z Systems are indicated by facility bits that
- * are mapped to the so-called machine flags. Particular machine flags are
- * then used to define ELF hardware capabilities; most notably hardware flags
- * that are essential for user space / glibc.
- *
- * Restrict the set of exposed CPU features to ELF hardware capabilities for
- * now. Additional machine flags can be indicated by values larger than
- * MAX_ELF_HWCAP_FEATURES.
- */
-#define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap))
-#define MAX_CPU_FEATURES MAX_ELF_HWCAP_FEATURES
-
-#define cpu_feature(feat) ilog2(HWCAP_ ## feat)
+#define cpu_feature(feature) (feature)
int cpu_have_feature(unsigned int nr);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 1d389847b588..30bb3ec4e5fc 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -11,30 +11,11 @@
#include <linux/types.h>
#include <asm/timex.h>
-#define CPUTIME_PER_USEC 4096ULL
-#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
-
-/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
-
-#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
-
-/*
- * Convert cputime to microseconds.
- */
-static inline u64 cputime_to_usecs(const u64 cputime)
-{
- return cputime >> 12;
-}
-
/*
* Convert cputime to nanoseconds.
*/
#define cputime_to_nsecs(cputime) tod_to_ns(cputime)
-u64 arch_cpu_idle_time(int cpu);
-
-#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
-
void account_idle_time_irq(void);
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
deleted file mode 100644
index 267a8f88e143..000000000000
--- a/arch/s390/include/asm/ctl_reg.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 1999, 2009
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef __ASM_CTL_REG_H
-#define __ASM_CTL_REG_H
-
-#include <linux/bits.h>
-
-#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
-#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
-#define CR0_FETCH_PROTECTION_OVERRIDE BIT(63 - 38)
-#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(63 - 39)
-#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
-#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
-#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
-#define CR0_CPU_TIMER_SUBMASK BIT(63 - 53)
-#define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54)
-#define CR0_UNUSED_56 BIT(63 - 56)
-#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57)
-#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58)
-
-#define CR14_UNUSED_32 BIT(63 - 32)
-#define CR14_UNUSED_33 BIT(63 - 33)
-#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35)
-#define CR14_RECOVERY_SUBMASK BIT(63 - 36)
-#define CR14_DEGRADATION_SUBMASK BIT(63 - 37)
-#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38)
-#define CR14_WARNING_SUBMASK BIT(63 - 39)
-
-#ifndef __ASSEMBLY__
-
-#include <linux/bug.h>
-
-#define __ctl_load(array, low, high) do { \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- \
- BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
- asm volatile( \
- " lctlg %1,%2,%0\n" \
- : \
- : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
- : "memory"); \
-} while (0)
-
-#define __ctl_store(array, low, high) do { \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- \
- BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
- asm volatile( \
- " stctg %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
-} while (0)
-
-static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
-{
- unsigned long reg;
-
- __ctl_store(reg, cr, cr);
- reg |= 1UL << bit;
- __ctl_load(reg, cr, cr);
-}
-
-static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
-{
- unsigned long reg;
-
- __ctl_store(reg, cr, cr);
- reg &= ~(1UL << bit);
- __ctl_load(reg, cr, cr);
-}
-
-void smp_ctl_set_clear_bit(int cr, int bit, bool set);
-
-static inline void ctl_set_bit(int cr, int bit)
-{
- smp_ctl_set_clear_bit(cr, bit, true);
-}
-
-static inline void ctl_clear_bit(int cr, int bit)
-{
- smp_ctl_set_clear_bit(cr, bit, false);
-}
-
-union ctlreg0 {
- unsigned long val;
- struct {
- unsigned long : 8;
- unsigned long tcx : 1; /* Transactional-Execution control */
- unsigned long pifo : 1; /* Transactional-Execution Program-
- Interruption-Filtering Override */
- unsigned long : 3;
- unsigned long ccc : 1; /* Cryptography counter control */
- unsigned long : 18;
- unsigned long : 3;
- unsigned long lap : 1; /* Low-address-protection control */
- unsigned long : 4;
- unsigned long edat : 1; /* Enhanced-DAT-enablement control */
- unsigned long : 2;
- unsigned long iep : 1; /* Instruction-Execution-Protection */
- unsigned long : 1;
- unsigned long afp : 1; /* AFP-register control */
- unsigned long vx : 1; /* Vector enablement control */
- unsigned long : 7;
- unsigned long sssm : 1; /* Service signal subclass mask */
- unsigned long : 9;
- };
-};
-
-union ctlreg2 {
- unsigned long val;
- struct {
- unsigned long : 33;
- unsigned long ducto : 25;
- unsigned long : 1;
- unsigned long gse : 1;
- unsigned long : 1;
- unsigned long tds : 1;
- unsigned long tdc : 2;
- };
-};
-
-union ctlreg5 {
- unsigned long val;
- struct {
- unsigned long : 33;
- unsigned long pasteo: 25;
- unsigned long : 6;
- };
-};
-
-union ctlreg15 {
- unsigned long val;
- struct {
- unsigned long lsea : 61;
- unsigned long : 3;
- };
-};
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
new file mode 100644
index 000000000000..72a9556d04f3
--- /dev/null
+++ b/arch/s390/include/asm/ctlreg.h
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_S390_CTLREG_H
+#define __ASM_S390_CTLREG_H
+
+#include <linux/bits.h>
+
+#define CR0_TRANSACTIONAL_EXECUTION_BIT (63 - 8)
+#define CR0_CLOCK_COMPARATOR_SIGN_BIT (63 - 10)
+#define CR0_CRYPTOGRAPHY_COUNTER_BIT (63 - 13)
+#define CR0_PAI_EXTENSION_BIT (63 - 14)
+#define CR0_CPUMF_EXTRACTION_AUTH_BIT (63 - 15)
+#define CR0_WARNING_TRACK_BIT (63 - 30)
+#define CR0_LOW_ADDRESS_PROTECTION_BIT (63 - 35)
+#define CR0_FETCH_PROTECTION_OVERRIDE_BIT (63 - 38)
+#define CR0_STORAGE_PROTECTION_OVERRIDE_BIT (63 - 39)
+#define CR0_EDAT_BIT (63 - 40)
+#define CR0_INSTRUCTION_EXEC_PROTECTION_BIT (63 - 43)
+#define CR0_VECTOR_BIT (63 - 46)
+#define CR0_MALFUNCTION_ALERT_SUBMASK_BIT (63 - 48)
+#define CR0_EMERGENCY_SIGNAL_SUBMASK_BIT (63 - 49)
+#define CR0_EXTERNAL_CALL_SUBMASK_BIT (63 - 50)
+#define CR0_CLOCK_COMPARATOR_SUBMASK_BIT (63 - 52)
+#define CR0_CPU_TIMER_SUBMASK_BIT (63 - 53)
+#define CR0_SERVICE_SIGNAL_SUBMASK_BIT (63 - 54)
+#define CR0_UNUSED_56_BIT (63 - 56)
+#define CR0_INTERRUPT_KEY_SUBMASK_BIT (63 - 57)
+#define CR0_MEASUREMENT_ALERT_SUBMASK_BIT (63 - 58)
+#define CR0_ETR_SUBMASK_BIT (63 - 59)
+#define CR0_IUCV_BIT (63 - 62)
+
+#define CR0_TRANSACTIONAL_EXECUTION BIT(CR0_TRANSACTIONAL_EXECUTION_BIT)
+#define CR0_CLOCK_COMPARATOR_SIGN BIT(CR0_CLOCK_COMPARATOR_SIGN_BIT)
+#define CR0_CRYPTOGRAPHY_COUNTER BIT(CR0_CRYPTOGRAPHY_COUNTER_BIT)
+#define CR0_PAI_EXTENSION BIT(CR0_PAI_EXTENSION_BIT)
+#define CR0_CPUMF_EXTRACTION_AUTH BIT(CR0_CPUMF_EXTRACTION_AUTH_BIT)
+#define CR0_WARNING_TRACK BIT(CR0_WARNING_TRACK_BIT)
+#define CR0_LOW_ADDRESS_PROTECTION BIT(CR0_LOW_ADDRESS_PROTECTION_BIT)
+#define CR0_FETCH_PROTECTION_OVERRIDE BIT(CR0_FETCH_PROTECTION_OVERRIDE_BIT)
+#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(CR0_STORAGE_PROTECTION_OVERRIDE_BIT)
+#define CR0_EDAT BIT(CR0_EDAT_BIT)
+#define CR0_INSTRUCTION_EXEC_PROTECTION BIT(CR0_INSTRUCTION_EXEC_PROTECTION_BIT)
+#define CR0_VECTOR BIT(CR0_VECTOR_BIT)
+#define CR0_MALFUNCTION_ALERT_SUBMASK BIT(CR0_MALFUNCTION_ALERT_SUBMASK_BIT)
+#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(CR0_EMERGENCY_SIGNAL_SUBMASK_BIT)
+#define CR0_EXTERNAL_CALL_SUBMASK BIT(CR0_EXTERNAL_CALL_SUBMASK_BIT)
+#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(CR0_CLOCK_COMPARATOR_SUBMASK_BIT)
+#define CR0_CPU_TIMER_SUBMASK BIT(CR0_CPU_TIMER_SUBMASK_BIT)
+#define CR0_SERVICE_SIGNAL_SUBMASK BIT(CR0_SERVICE_SIGNAL_SUBMASK_BIT)
+#define CR0_UNUSED_56 BIT(CR0_UNUSED_56_BIT)
+#define CR0_INTERRUPT_KEY_SUBMASK BIT(CR0_INTERRUPT_KEY_SUBMASK_BIT)
+#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(CR0_MEASUREMENT_ALERT_SUBMASK_BIT)
+#define CR0_ETR_SUBMASK BIT(CR0_ETR_SUBMASK_BIT)
+#define CR0_IUCV BIT(CR0_IUCV_BIT)
+
+#define CR2_MIO_ADDRESSING_BIT (63 - 58)
+#define CR2_GUARDED_STORAGE_BIT (63 - 59)
+
+#define CR2_MIO_ADDRESSING BIT(CR2_MIO_ADDRESSING_BIT)
+#define CR2_GUARDED_STORAGE BIT(CR2_GUARDED_STORAGE_BIT)
+
+#define CR14_UNUSED_32_BIT (63 - 32)
+#define CR14_UNUSED_33_BIT (63 - 33)
+#define CR14_CHANNEL_REPORT_SUBMASK_BIT (63 - 35)
+#define CR14_RECOVERY_SUBMASK_BIT (63 - 36)
+#define CR14_DEGRADATION_SUBMASK_BIT (63 - 37)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK_BIT (63 - 38)
+#define CR14_WARNING_SUBMASK_BIT (63 - 39)
+
+#define CR14_UNUSED_32 BIT(CR14_UNUSED_32_BIT)
+#define CR14_UNUSED_33 BIT(CR14_UNUSED_33_BIT)
+#define CR14_CHANNEL_REPORT_SUBMASK BIT(CR14_CHANNEL_REPORT_SUBMASK_BIT)
+#define CR14_RECOVERY_SUBMASK BIT(CR14_RECOVERY_SUBMASK_BIT)
+#define CR14_DEGRADATION_SUBMASK BIT(CR14_DEGRADATION_SUBMASK_BIT)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(CR14_EXTERNAL_DAMAGE_SUBMASK_BIT)
+#define CR14_WARNING_SUBMASK BIT(CR14_WARNING_SUBMASK_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bug.h>
+
+struct ctlreg {
+ unsigned long val;
+};
+
+#define __local_ctl_load(low, high, array) do { \
+ struct addrtype { \
+ char _[sizeof(array)]; \
+ }; \
+ int _high = high; \
+ int _low = low; \
+ int _esize; \
+ \
+ _esize = (_high - _low + 1) * sizeof(struct ctlreg); \
+ BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
+ typecheck(struct ctlreg, array[0]); \
+ asm volatile( \
+ " lctlg %[_low],%[_high],%[_arr]\n" \
+ : \
+ : [_arr] "Q" (*(struct addrtype *)(&array)), \
+ [_low] "i" (low), [_high] "i" (high) \
+ : "memory"); \
+} while (0)
+
+#define __local_ctl_store(low, high, array) do { \
+ struct addrtype { \
+ char _[sizeof(array)]; \
+ }; \
+ int _high = high; \
+ int _low = low; \
+ int _esize; \
+ \
+ _esize = (_high - _low + 1) * sizeof(struct ctlreg); \
+ BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
+ typecheck(struct ctlreg, array[0]); \
+ asm volatile( \
+ " stctg %[_low],%[_high],%[_arr]\n" \
+ : [_arr] "=Q" (*(struct addrtype *)(&array)) \
+ : [_low] "i" (low), [_high] "i" (high)); \
+} while (0)
+
+static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
+{
+ asm volatile(
+ " lctlg %[cr],%[cr],%[reg]\n"
+ :
+ : [reg] "Q" (*reg), [cr] "i" (cr)
+ : "memory");
+}
+
+static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
+{
+ asm volatile(
+ " stctg %[cr],%[cr],%[reg]\n"
+ : [reg] "=Q" (*reg)
+ : [cr] "i" (cr));
+}
+
+static __always_inline struct ctlreg local_ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ struct ctlreg new, old;
+
+ local_ctl_store(cr, &old);
+ new = old;
+ new.val |= 1UL << bit;
+ local_ctl_load(cr, &new);
+ return old;
+}
+
+static __always_inline struct ctlreg local_ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ struct ctlreg new, old;
+
+ local_ctl_store(cr, &old);
+ new = old;
+ new.val &= ~(1UL << bit);
+ local_ctl_load(cr, &new);
+ return old;
+}
+
+struct lowcore;
+
+void system_ctlreg_lock(void);
+void system_ctlreg_unlock(void);
+void system_ctlreg_init_save_area(struct lowcore *lc);
+void system_ctlreg_modify(unsigned int cr, unsigned long data, int request);
+
+enum {
+ CTLREG_SET_BIT,
+ CTLREG_CLEAR_BIT,
+ CTLREG_LOAD,
+};
+
+static inline void system_ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ system_ctlreg_modify(cr, bit, CTLREG_SET_BIT);
+}
+
+static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ system_ctlreg_modify(cr, bit, CTLREG_CLEAR_BIT);
+}
+
+static inline void system_ctl_load(unsigned int cr, struct ctlreg *reg)
+{
+ system_ctlreg_modify(cr, reg->val, CTLREG_LOAD);
+}
+
+union ctlreg0 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long : 8;
+ unsigned long tcx : 1; /* Transactional-Execution control */
+ unsigned long pifo : 1; /* Transactional-Execution Program-
+ Interruption-Filtering Override */
+ unsigned long : 3;
+ unsigned long ccc : 1; /* Cryptography counter control */
+ unsigned long pec : 1; /* PAI extension control */
+ unsigned long : 17;
+ unsigned long : 3;
+ unsigned long lap : 1; /* Low-address-protection control */
+ unsigned long : 4;
+ unsigned long edat : 1; /* Enhanced-DAT-enablement control */
+ unsigned long : 2;
+ unsigned long iep : 1; /* Instruction-Execution-Protection */
+ unsigned long : 1;
+ unsigned long afp : 1; /* AFP-register control */
+ unsigned long vx : 1; /* Vector enablement control */
+ unsigned long : 7;
+ unsigned long sssm : 1; /* Service signal subclass mask */
+ unsigned long : 9;
+ };
+};
+
+union ctlreg2 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long : 33;
+ unsigned long ducto : 25;
+ unsigned long : 1;
+ unsigned long gse : 1;
+ unsigned long : 1;
+ unsigned long tds : 1;
+ unsigned long tdc : 2;
+ };
+};
+
+union ctlreg5 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long : 33;
+ unsigned long pasteo: 25;
+ unsigned long : 6;
+ };
+};
+
+union ctlreg15 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long lsea : 61;
+ unsigned long : 3;
+ };
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_S390_CTLREG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 77f24262c25c..ccd4e148b5ed 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -4,8 +4,8 @@
*
* Copyright IBM Corp. 1999, 2020
*/
-#ifndef DEBUG_H
-#define DEBUG_H
+#ifndef _ASM_S390_DEBUG_H
+#define _ASM_S390_DEBUG_H
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -222,7 +222,7 @@ static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
- * stored in the s390dbf. See Documentation/s390/s390dbf.rst for more details!
+ * stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details!
*/
extern debug_entry_t *
__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
@@ -350,7 +350,7 @@ static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
- * stored in the s390dbf. See Documentation/s390/s390dbf.rst for more details!
+ * stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details!
*/
extern debug_entry_t *
__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
#endif /* MODULE */
-#endif /* DEBUG_H */
+#endif /* _ASM_S390_DEBUG_H */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 56e99c286d12..20b94220113b 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -12,6 +12,7 @@
#include <linux/if_ether.h>
#include <linux/percpu.h>
#include <asm/asm-extable.h>
+#include <asm/cio.h>
enum diag_stat_enum {
DIAG_STAT_X008,
@@ -20,6 +21,7 @@ enum diag_stat_enum {
DIAG_STAT_X014,
DIAG_STAT_X044,
DIAG_STAT_X064,
+ DIAG_STAT_X08C,
DIAG_STAT_X09C,
DIAG_STAT_X0DC,
DIAG_STAT_X204,
@@ -34,6 +36,7 @@ enum diag_stat_enum {
DIAG_STAT_X304,
DIAG_STAT_X308,
DIAG_STAT_X318,
+ DIAG_STAT_X320,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -41,6 +44,13 @@ enum diag_stat_enum {
void diag_stat_inc(enum diag_stat_enum nr);
void diag_stat_inc_norecursion(enum diag_stat_enum nr);
+struct hypfs_diag0c_entry;
+
+/*
+ * Diagnose 0c: Pseudo Timer
+ */
+void diag0c(struct hypfs_diag0c_entry *data);
+
/*
* Diagnose 10: Release page range
*/
@@ -79,10 +89,20 @@ struct diag210 {
u8 vrdccrty; /* real device type (output) */
u8 vrdccrmd; /* real device model (output) */
u8 vrdccrft; /* real device feature (output) */
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
extern int diag210(struct diag210 *addr);
+struct diag8c {
+ u8 flags;
+ u8 num_partitions;
+ u16 width;
+ u16 height;
+ u8 data[];
+} __packed __aligned(4);
+
+extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno);
+
/* bit is set in flags, when physical cpu info is included in diag 204 data */
#define DIAG204_LPAR_PHYS_FLG 0x80
#define DIAG204_LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */
@@ -96,6 +116,8 @@ enum diag204_sc {
DIAG204_SUBC_STIB7 = 7
};
+#define DIAG204_SUBCODE_MASK 0xffff
+
/* The two available diag 204 data formats */
enum diag204_format {
DIAG204_INFO_SIMPLE = 0,
@@ -316,9 +338,10 @@ struct hypfs_diag0c_entry;
*/
struct diag_ops {
int (*diag210)(struct diag210 *addr);
- int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
+ int (*diag26c)(unsigned long rx, unsigned long rx1, enum diag26c_sc subcode);
int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
- void (*diag0c)(struct hypfs_diag0c_entry *entry);
+ int (*diag8c)(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
+ void (*diag0c)(unsigned long rx);
void (*diag308_reset)(void);
};
@@ -326,9 +349,10 @@ extern struct diag_ops diag_amode31_ops;
extern struct diag210 *__diag210_tmp_amode31;
int _diag210_amode31(struct diag210 *addr);
-int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
+int _diag26c_amode31(unsigned long rx, unsigned long rx1, enum diag26c_sc subcode);
int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
-void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
+void _diag0c_amode31(unsigned long rx);
void _diag308_reset_amode31(void);
+int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/dma-types.h b/arch/s390/include/asm/dma-types.h
new file mode 100644
index 000000000000..5c5734e6946c
--- /dev/null
+++ b/arch/s390/include/asm/dma-types.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_DMA_TYPES_H_
+#define _ASM_S390_DMA_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * typedef dma32_t
+ * Contains a 31 bit absolute address to a DMA capable piece of storage.
+ *
+ * For CIO, DMA addresses are always absolute addresses. These addresses tend
+ * to be used in architectured memory blocks (like ORB, IDAW, MIDAW). Under
+ * certain circumstances 31 bit wide addresses must be used because the
+ * address must fit in 31 bits.
+ *
+ * This type is to be used when such fields can be modelled as 32 bit wide.
+ */
+typedef u32 __bitwise dma32_t;
+
+/*
+ * typedef dma64_t
+ * Contains a 64 bit absolute address to a DMA capable piece of storage.
+ *
+ * For CIO, DMA addresses are always absolute addresses. These addresses tend
+ * to be used in architectured memory blocks (like ORB, IDAW, MIDAW).
+ *
+ * This type is to be used to model such 64 bit wide fields.
+ */
+typedef u64 __bitwise dma64_t;
+
+/*
+ * Although DMA addresses should be obtained using the DMA API, in cases when
+ * it is known that the first argument holds a virtual address that points to
+ * DMA-able 31 bit addressable storage, then this function can be safely used.
+ */
+static inline dma32_t virt_to_dma32(void *ptr)
+{
+ return (__force dma32_t)__pa32(ptr);
+}
+
+static inline void *dma32_to_virt(dma32_t addr)
+{
+ return __va((__force unsigned long)addr);
+}
+
+static inline dma32_t u32_to_dma32(u32 addr)
+{
+ return (__force dma32_t)addr;
+}
+
+static inline u32 dma32_to_u32(dma32_t addr)
+{
+ return (__force u32)addr;
+}
+
+static inline dma32_t dma32_add(dma32_t a, u32 b)
+{
+ return (__force dma32_t)((__force u32)a + b);
+}
+
+static inline dma32_t dma32_and(dma32_t a, u32 b)
+{
+ return (__force dma32_t)((__force u32)a & b);
+}
+
+/*
+ * Although DMA addresses should be obtained using the DMA API, in cases when
+ * it is known that the first argument holds a virtual address that points to
+ * DMA-able storage, then this function can be safely used.
+ */
+static inline dma64_t virt_to_dma64(void *ptr)
+{
+ return (__force dma64_t)__pa(ptr);
+}
+
+static inline void *dma64_to_virt(dma64_t addr)
+{
+ return __va((__force unsigned long)addr);
+}
+
+static inline dma64_t u64_to_dma64(u64 addr)
+{
+ return (__force dma64_t)addr;
+}
+
+static inline u64 dma64_to_u64(dma64_t addr)
+{
+ return (__force u64)addr;
+}
+
+static inline dma64_t dma64_add(dma64_t a, u64 b)
+{
+ return (__force dma64_t)((__force u64)a + b);
+}
+
+static inline dma64_t dma64_and(dma64_t a, u64 b)
+{
+ return (__force dma64_t)((__force u64)a & b);
+}
+
+#endif /* _ASM_S390_DMA_TYPES_H_ */
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index 6f26f35d4a71..7fe3e31956d7 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -2,19 +2,13 @@
#ifndef _ASM_S390_DMA_H
#define _ASM_S390_DMA_H
-#include <asm/io.h>
+#include <linux/io.h>
/*
* MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
* to DMA. It _is_ used for the s390 memory zone split at 2GB caused
* by the 31 bit heritage.
*/
-#define MAX_DMA_ADDRESS 0x80000000
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
+#define MAX_DMA_ADDRESS __va(0x80000000)
#endif /* _ASM_S390_DMA_H */
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 06f795855af7..c4589ec4505e 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/blk_types.h>
+#include <asm/dma-types.h>
struct arqb {
u64 data;
@@ -45,7 +46,7 @@ struct msb {
u16:12;
u16 bs:4;
u32 blk_count;
- u64 data_addr;
+ dma64_t data_addr;
u64 scm_addr;
u64:64;
} __packed;
@@ -54,7 +55,7 @@ struct aidaw {
u8 flags;
u32 :24;
u32 :32;
- u64 data_addr;
+ dma64_t data_addr;
} __packed;
#define MSB_OC_CLEAR 0
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 000de2b1e67a..7f5004065e8a 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -8,7 +8,7 @@
#include <linux/processor.h>
#include <linux/uaccess.h>
#include <asm/timex.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
#include <asm/pai.h>
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
@@ -41,8 +41,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
- if (test_cpu_flag(CIF_FPU))
- __load_fpu_regs();
+ load_user_fpu_regs();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
debug_user_asce(1);
@@ -60,9 +59,4 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
-static inline bool on_thread_stack(void)
-{
- return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
-}
-
#endif
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 94b6919026df..796007125dff 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -111,4 +111,10 @@ static inline void stfle(u64 *stfle_fac_list, int size)
preempt_enable();
}
+/**
+ * stfle_size - Actual size of the facility list as specified by stfle
+ * (number of double words)
+ */
+unsigned int stfle_size(void);
+
#endif /* __ASM_FACILITY_H */
diff --git a/arch/s390/include/asm/fault.h b/arch/s390/include/asm/fault.h
new file mode 100644
index 000000000000..d326f56603d6
--- /dev/null
+++ b/arch/s390/include/asm/fault.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+#ifndef _ASM_S390_FAULT_H
+#define _ASM_S390_FAULT_H
+
+union teid {
+ unsigned long val;
+ struct {
+ unsigned long addr : 52; /* Translation-exception Address */
+ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
+ unsigned long : 2;
+ unsigned long b56 : 1;
+ unsigned long : 3;
+ unsigned long b60 : 1;
+ unsigned long b61 : 1;
+ unsigned long as : 2; /* ASCE Identifier */
+ };
+};
+
+enum {
+ TEID_FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
+ TEID_FSI_STORE = 1, /* Exception was due to store operation */
+ TEID_FSI_FETCH = 2 /* Exception was due to fetch operation */
+};
+
+#endif /* _ASM_S390_FAULT_H */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index b8a028a36173..80f82a739b45 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -10,6 +10,7 @@
#define _ASM_S390_FCX_H
#include <linux/types.h>
+#include <asm/dma-types.h>
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
@@ -43,16 +44,16 @@ struct tcw {
u32 r:1;
u32 w:1;
u32 :16;
- u64 output;
- u64 input;
- u64 tsb;
- u64 tccb;
+ dma64_t output;
+ dma64_t input;
+ dma64_t tsb;
+ dma64_t tccb;
u32 output_count;
u32 input_count;
u32 :32;
u32 :32;
u32 :32;
- u32 intrg;
+ dma32_t intrg;
} __attribute__ ((packed, aligned(64)));
#define TIDAW_FLAGS_LAST (1 << (7 - 0))
@@ -73,7 +74,7 @@ struct tidaw {
u32 flags:8;
u32 :24;
u32 count;
- u64 addr;
+ dma64_t addr;
} __attribute__ ((packed, aligned(16)));
/**
@@ -286,7 +287,7 @@ struct tccb_tcat {
*/
struct tccb {
struct tccb_tcah tcah;
- u8 tca[0];
+ u8 tca[];
} __attribute__ ((packed, aligned(8)));
struct tcw *tcw_get_intrg(struct tcw *tcw);
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/fpu-insn-asm.h
index 95480ed9149e..02ccfe46050a 100644
--- a/arch/s390/include/asm/vx-insn.h
+++ b/arch/s390/include/asm/fpu-insn-asm.h
@@ -9,11 +9,14 @@
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#ifndef __ASM_S390_VX_INSN_H
-#define __ASM_S390_VX_INSN_H
+#ifndef __ASM_S390_FPU_INSN_ASM_H
+#define __ASM_S390_FPU_INSN_ASM_H
-#ifdef __ASSEMBLY__
+#ifndef __ASM_S390_FPU_INSN_H
+#error only <asm/fpu-insn.h> can be included directly
+#endif
+#ifdef __ASSEMBLY__
/* Macros to generate vector instruction byte code */
@@ -192,10 +195,26 @@
/* RXB - Compute most significant bit used vector registers
*
* @rxb: Operand to store computed RXB value
- * @v1: First vector register designated operand
- * @v2: Second vector register designated operand
- * @v3: Third vector register designated operand
- * @v4: Fourth vector register designated operand
+ * @v1: Vector register designated operand whose MSB is stored in
+ * RXB bit 0 (instruction bit 36) and whose remaining bits
+ * are stored in instruction bits 8-11.
+ * @v2: Vector register designated operand whose MSB is stored in
+ * RXB bit 1 (instruction bit 37) and whose remaining bits
+ * are stored in instruction bits 12-15.
+ * @v3: Vector register designated operand whose MSB is stored in
+ * RXB bit 2 (instruction bit 38) and whose remaining bits
+ * are stored in instruction bits 16-19.
+ * @v4: Vector register designated operand whose MSB is stored in
+ * RXB bit 3 (instruction bit 39) and whose remaining bits
+ * are stored in instruction bits 32-35.
+ *
+ * Note: In most vector instruction formats [1] V1, V2, V3, and V4 directly
+ * correspond to @v1, @v2, @v3, and @v4. But there are exceptions, such as but
+ * not limited to the vector instruction formats VRR-g, VRR-h, VRS-a, VRS-d,
+ * and VSI.
+ *
+ * [1] IBM z/Architecture Principles of Operation, chapter "Program
+ * Execution, section "Instructions", subsection "Instruction Formats".
*/
.macro RXB rxb v1 v2=0 v3=0 v4=0
\rxb = 0
@@ -220,6 +239,9 @@
* @v2: Second vector register designated operand (for RXB)
* @v3: Third vector register designated operand (for RXB)
* @v4: Fourth vector register designated operand (for RXB)
+ *
+ * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
+ * description for further details.
*/
.macro MRXB m v1 v2=0 v3=0 v4=0
rxb = 0
@@ -235,6 +257,9 @@
* @v2: Second vector register designated operand (for RXB)
* @v3: Third vector register designated operand (for RXB)
* @v4: Fourth vector register designated operand (for RXB)
+ *
+ * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
+ * description for further details.
*/
.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
MRXB \m, \v1, \v2, \v3, \v4
@@ -347,7 +372,7 @@
VX_NUM v3, \vr
.word 0xE700 | (r1 << 4) | (v3&15)
.word (b2 << 12) | (\disp)
- MRXBOPC \m, 0x21, v3
+ MRXBOPC \m, 0x21, 0, v3
.endm
.macro VLGVB gr, vr, disp, base="%r0"
VLGV \gr, \vr, \disp, \base, 0
@@ -496,6 +521,25 @@
VMRL \vr1, \vr2, \vr3, 3
.endm
+/* VECTOR LOAD WITH LENGTH */
+.macro VLL v, gr, disp, base
+ VX_NUM v1, \v
+ GR_NUM b2, \base
+ GR_NUM r3, \gr
+ .word 0xE700 | ((v1&15) << 4) | r3
+ .word (b2 << 12) | (\disp)
+ MRXBOPC 0, 0x37, v1
+.endm
+
+/* VECTOR STORE WITH LENGTH */
+.macro VSTL v, gr, disp, base
+ VX_NUM v1, \v
+ GR_NUM b2, \base
+ GR_NUM r3, \gr
+ .word 0xE700 | ((v1&15) << 4) | r3
+ .word (b2 << 12) | (\disp)
+ MRXBOPC 0, 0x3f, v1
+.endm
/* Vector integer instructions */
@@ -509,6 +553,16 @@
MRXBOPC 0, 0x68, v1, v2, v3
.endm
+/* VECTOR CHECKSUM */
+.macro VCKSM vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x66, v1, v2, v3
+.endm
+
/* VECTOR EXCLUSIVE OR */
.macro VX vr1, vr2, vr3
VX_NUM v1, \vr1
@@ -675,4 +729,4 @@
.endm
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_S390_VX_INSN_H */
+#endif /* __ASM_S390_FPU_INSN_ASM_H */
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
new file mode 100644
index 000000000000..c1e2e521d9af
--- /dev/null
+++ b/arch/s390/include/asm/fpu-insn.h
@@ -0,0 +1,486 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Floating Point and Vector Instructions
+ *
+ */
+
+#ifndef __ASM_S390_FPU_INSN_H
+#define __ASM_S390_FPU_INSN_H
+
+#include <asm/fpu-insn-asm.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/instrumented.h>
+#include <asm/asm-extable.h>
+
+asm(".include \"asm/fpu-insn-asm.h\"\n");
+
+/*
+ * Various small helper functions, which can and should be used within
+ * kernel fpu code sections. Each function represents only one floating
+ * point or vector instruction (except for helper functions which require
+ * exception handling).
+ *
+ * This allows to use floating point and vector instructions like C
+ * functions, which has the advantage that all supporting code, like
+ * e.g. loops, can be written in easy to read C code.
+ *
+ * Each of the helper functions provides support for code instrumentation,
+ * like e.g. KASAN. Therefore instrumentation is also covered automatically
+ * when using these functions.
+ *
+ * In order to ensure that code generated with the helper functions stays
+ * within kernel fpu sections, which are guarded with kernel_fpu_begin()
+ * and kernel_fpu_end() calls, each function has a mandatory "memory"
+ * barrier.
+ */
+
+static __always_inline void fpu_cefbr(u8 f1, s32 val)
+{
+ asm volatile("cefbr %[f1],%[val]\n"
+ :
+ : [f1] "I" (f1), [val] "d" (val)
+ : "memory");
+}
+
+static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
+{
+ unsigned long val;
+
+ asm volatile("cgebr %[val],%[mode],%[f2]\n"
+ : [val] "=d" (val)
+ : [f2] "I" (f2), [mode] "I" (mode)
+ : "memory");
+ return val;
+}
+
+static __always_inline void fpu_debr(u8 f1, u8 f2)
+{
+ asm volatile("debr %[f1],%[f2]\n"
+ :
+ : [f1] "I" (f1), [f2] "I" (f2)
+ : "memory");
+}
+
+static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
+{
+ instrument_read(reg, sizeof(*reg));
+ asm volatile("ld %[fpr],%[reg]\n"
+ :
+ : [fpr] "I" (fpr), [reg] "Q" (reg->ui)
+ : "memory");
+}
+
+static __always_inline void fpu_ldgr(u8 f1, u32 val)
+{
+ asm volatile("ldgr %[f1],%[val]\n"
+ :
+ : [f1] "I" (f1), [val] "d" (val)
+ : "memory");
+}
+
+static __always_inline void fpu_lfpc(unsigned int *fpc)
+{
+ instrument_read(fpc, sizeof(*fpc));
+ asm volatile("lfpc %[fpc]"
+ :
+ : [fpc] "Q" (*fpc)
+ : "memory");
+}
+
+/**
+ * fpu_lfpc_safe - Load floating point control register safely.
+ * @fpc: new value for floating point control register
+ *
+ * Load floating point control register. This may lead to an exception,
+ * since a saved value may have been modified by user space (ptrace,
+ * signal return, kvm registers) to an invalid value. In such a case
+ * set the floating point control register to zero.
+ */
+static inline void fpu_lfpc_safe(unsigned int *fpc)
+{
+ u32 tmp;
+
+ instrument_read(fpc, sizeof(*fpc));
+ asm volatile("\n"
+ "0: lfpc %[fpc]\n"
+ "1: nopr %%r7\n"
+ ".pushsection .fixup, \"ax\"\n"
+ "2: lghi %[tmp],0\n"
+ " sfpc %[tmp]\n"
+ " jg 1b\n"
+ ".popsection\n"
+ EX_TABLE(1b, 2b)
+ : [tmp] "=d" (tmp)
+ : [fpc] "Q" (*fpc)
+ : "memory");
+}
+
+static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
+{
+ instrument_write(reg, sizeof(*reg));
+ asm volatile("std %[fpr],%[reg]\n"
+ : [reg] "=Q" (reg->ui)
+ : [fpr] "I" (fpr)
+ : "memory");
+}
+
+static __always_inline void fpu_sfpc(unsigned int fpc)
+{
+ asm volatile("sfpc %[fpc]"
+ :
+ : [fpc] "d" (fpc)
+ : "memory");
+}
+
+static __always_inline void fpu_stfpc(unsigned int *fpc)
+{
+ instrument_write(fpc, sizeof(*fpc));
+ asm volatile("stfpc %[fpc]"
+ : [fpc] "=Q" (*fpc)
+ :
+ : "memory");
+}
+
+static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VAB %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VCKSM %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VESRAVB %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vgfmag(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ asm volatile("VGFMAG %[v1],%[v2],%[v3],%[v4]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
+ : "memory");
+}
+
+static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VGFMG %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+#ifdef CONFIG_CC_IS_CLANG
+
+static __always_inline void fpu_vl(u8 v1, const void *vxr)
+{
+ instrument_read(vxr, sizeof(__vector128));
+ asm volatile("\n"
+ " la 1,%[vxr]\n"
+ " VL %[v1],0,,1\n"
+ :
+ : [vxr] "R" (*(__vector128 *)vxr),
+ [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#else /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vl(u8 v1, const void *vxr)
+{
+ instrument_read(vxr, sizeof(__vector128));
+ asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
+ :
+ : [vxr] "Q" (*(__vector128 *)vxr),
+ [v1] "I" (v1)
+ : "memory");
+}
+
+#endif /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
+{
+ asm volatile("VLEIB %[v],%[val],%[index]"
+ :
+ : [v] "I" (v), [val] "K" (val), [index] "I" (index)
+ : "memory");
+}
+
+static __always_inline void fpu_vleig(u8 v, s16 val, u8 index)
+{
+ asm volatile("VLEIG %[v],%[val],%[index]"
+ :
+ : [v] "I" (v), [val] "K" (val), [index] "I" (index)
+ : "memory");
+}
+
+static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
+{
+ u64 val;
+
+ asm volatile("VLGVF %[val],%[v],%[index]"
+ : [val] "=d" (val)
+ : [v] "I" (v), [index] "L" (index)
+ : "memory");
+ return val;
+}
+
+#ifdef CONFIG_CC_IS_CLANG
+
+static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_read(vxr, size);
+ asm volatile("\n"
+ " la 1,%[vxr]\n"
+ " VLL %[v1],%[index],0,1\n"
+ :
+ : [vxr] "R" (*(u8 *)vxr),
+ [index] "d" (index),
+ [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#else /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_read(vxr, size);
+ asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ :
+ : [vxr] "Q" (*(u8 *)vxr),
+ [index] "d" (index),
+ [v1] "I" (v1)
+ : "memory");
+}
+
+#endif /* CONFIG_CC_IS_CLANG */
+
+#ifdef CONFIG_CC_IS_CLANG
+
+#define fpu_vlm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_read(_v, size); \
+ asm volatile("\n" \
+ " la 1,%[vxrs]\n" \
+ " VLM %[v1],%[v3],0,1\n" \
+ : \
+ : [vxrs] "R" (*_v), \
+ [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory", "1"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#else /* CONFIG_CC_IS_CLANG */
+
+#define fpu_vlm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_read(_v, size); \
+ asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ : \
+ : [vxrs] "Q" (*_v), \
+ [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#endif /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vlr(u8 v1, u8 v2)
+{
+ asm volatile("VLR %[v1],%[v2]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2)
+ : "memory");
+}
+
+static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
+{
+ asm volatile("VLVGF %[v],%[val],%[index]"
+ :
+ : [v] "I" (v), [val] "d" (val), [index] "L" (index)
+ : "memory");
+}
+
+static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VN %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vperm(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ asm volatile("VPERM %[v1],%[v2],%[v3],%[v4]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
+ : "memory");
+}
+
+static __always_inline void fpu_vrepib(u8 v1, s16 i2)
+{
+ asm volatile("VREPIB %[v1],%[i2]"
+ :
+ : [v1] "I" (v1), [i2] "K" (i2)
+ : "memory");
+}
+
+static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VSRLB %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+#ifdef CONFIG_CC_IS_CLANG
+
+static __always_inline void fpu_vst(u8 v1, const void *vxr)
+{
+ instrument_write(vxr, sizeof(__vector128));
+ asm volatile("\n"
+ " la 1,%[vxr]\n"
+ " VST %[v1],0,,1\n"
+ : [vxr] "=R" (*(__vector128 *)vxr)
+ : [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#else /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vst(u8 v1, const void *vxr)
+{
+ instrument_write(vxr, sizeof(__vector128));
+ asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
+ : [vxr] "=Q" (*(__vector128 *)vxr)
+ : [v1] "I" (v1)
+ : "memory");
+}
+
+#endif /* CONFIG_CC_IS_CLANG */
+
+#ifdef CONFIG_CC_IS_CLANG
+
+static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_write(vxr, size);
+ asm volatile("\n"
+ " la 1,%[vxr]\n"
+ " VSTL %[v1],%[index],0,1\n"
+ : [vxr] "=R" (*(u8 *)vxr)
+ : [index] "d" (index), [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#else /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_write(vxr, size);
+ asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ : [vxr] "=Q" (*(u8 *)vxr)
+ : [index] "d" (index), [v1] "I" (v1)
+ : "memory");
+}
+
+#endif /* CONFIG_CC_IS_CLANG */
+
+#ifdef CONFIG_CC_IS_CLANG
+
+#define fpu_vstm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_write(_v, size); \
+ asm volatile("\n" \
+ " la 1,%[vxrs]\n" \
+ " VSTM %[v1],%[v3],0,1\n" \
+ : [vxrs] "=R" (*_v) \
+ : [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory", "1"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#else /* CONFIG_CC_IS_CLANG */
+
+#define fpu_vstm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_write(_v, size); \
+ asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ : [vxrs] "=Q" (*_v) \
+ : [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#endif /* CONFIG_CC_IS_CLANG */
+
+static __always_inline void fpu_vupllf(u8 v1, u8 v2)
+{
+ asm volatile("VUPLLF %[v1],%[v2]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2)
+ : "memory");
+}
+
+static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VX %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vzero(u8 v)
+{
+ asm volatile("VZERO %[v]"
+ :
+ : [v] "I" (v)
+ : "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_S390_FPU_INSN_H */
diff --git a/arch/s390/include/asm/fpu-types.h b/arch/s390/include/asm/fpu-types.h
new file mode 100644
index 000000000000..8d58d5a95399
--- /dev/null
+++ b/arch/s390/include/asm/fpu-types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * FPU data structures
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_TYPES_H
+#define _ASM_S390_FPU_TYPES_H
+
+#include <asm/sigcontext.h>
+
+struct fpu {
+ u32 fpc;
+ __vector128 vxrs[__NUM_VXRS] __aligned(8);
+};
+
+struct kernel_fpu_hdr {
+ int mask;
+ u32 fpc;
+};
+
+struct kernel_fpu {
+ struct kernel_fpu_hdr hdr;
+ __vector128 vxrs[] __aligned(8);
+};
+
+#define KERNEL_FPU_STRUCT(vxr_size) \
+struct kernel_fpu_##vxr_size { \
+ struct kernel_fpu_hdr hdr; \
+ __vector128 vxrs[vxr_size] __aligned(8); \
+}
+
+KERNEL_FPU_STRUCT(8);
+KERNEL_FPU_STRUCT(16);
+KERNEL_FPU_STRUCT(32);
+
+#define DECLARE_KERNEL_FPU_ONSTACK(vxr_size, name) \
+ struct kernel_fpu_##vxr_size name __uninitialized
+
+#define DECLARE_KERNEL_FPU_ONSTACK8(name) \
+ DECLARE_KERNEL_FPU_ONSTACK(8, name)
+
+#define DECLARE_KERNEL_FPU_ONSTACK16(name) \
+ DECLARE_KERNEL_FPU_ONSTACK(16, name)
+
+#define DECLARE_KERNEL_FPU_ONSTACK32(name) \
+ DECLARE_KERNEL_FPU_ONSTACK(32, name)
+
+#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/fpu.h b/arch/s390/include/asm/fpu.h
new file mode 100644
index 000000000000..c84cb33913e2
--- /dev/null
+++ b/arch/s390/include/asm/fpu.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * In-kernel FPU support functions
+ *
+ *
+ * Consider these guidelines before using in-kernel FPU functions:
+ *
+ * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
+ * use of floating-point or vector registers and instructions.
+ *
+ * 2. For kernel_fpu_begin(), specify the vector register range you want to
+ * use with the KERNEL_VXR_* constants. Consider these usage guidelines:
+ *
+ * a) If your function typically runs in process-context, use the lower
+ * half of the vector registers, for example, specify KERNEL_VXR_LOW.
+ * b) If your function typically runs in soft-irq or hard-irq context,
+ * prefer using the upper half of the vector registers, for example,
+ * specify KERNEL_VXR_HIGH.
+ *
+ * If you adhere to these guidelines, an interrupted process context
+ * does not require to save and restore vector registers because of
+ * disjoint register ranges.
+ *
+ * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
+ * includes logic to save and restore up to 16 vector registers at once.
+ *
+ * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
+ * struct kernel_fpu states. Vector registers that are in use by outer
+ * levels are saved and restored. You can minimize the save and restore
+ * effort by choosing disjoint vector register ranges.
+ *
+ * 5. To use vector floating-point instructions, specify the KERNEL_FPC
+ * flag to save and restore floating-point controls in addition to any
+ * vector register range.
+ *
+ * 6. To use floating-point registers and instructions only, specify the
+ * KERNEL_FPR flag. This flag triggers a save and restore of vector
+ * registers V0 to V15 and floating-point controls.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_H
+#define _ASM_S390_FPU_H
+
+#include <linux/processor.h>
+#include <linux/preempt.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <asm/sigcontext.h>
+#include <asm/fpu-types.h>
+#include <asm/fpu-insn.h>
+#include <asm/facility.h>
+
+static inline bool cpu_has_vx(void)
+{
+ return likely(test_facility(129));
+}
+
+enum {
+ KERNEL_FPC_BIT = 0,
+ KERNEL_VXR_V0V7_BIT,
+ KERNEL_VXR_V8V15_BIT,
+ KERNEL_VXR_V16V23_BIT,
+ KERNEL_VXR_V24V31_BIT,
+};
+
+#define KERNEL_FPC BIT(KERNEL_FPC_BIT)
+#define KERNEL_VXR_V0V7 BIT(KERNEL_VXR_V0V7_BIT)
+#define KERNEL_VXR_V8V15 BIT(KERNEL_VXR_V8V15_BIT)
+#define KERNEL_VXR_V16V23 BIT(KERNEL_VXR_V16V23_BIT)
+#define KERNEL_VXR_V24V31 BIT(KERNEL_VXR_V24V31_BIT)
+
+#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7 | KERNEL_VXR_V8V15)
+#define KERNEL_VXR_MID (KERNEL_VXR_V8V15 | KERNEL_VXR_V16V23)
+#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23 | KERNEL_VXR_V24V31)
+
+#define KERNEL_VXR (KERNEL_VXR_LOW | KERNEL_VXR_HIGH)
+#define KERNEL_FPR (KERNEL_FPC | KERNEL_VXR_LOW)
+
+void load_fpu_state(struct fpu *state, int flags);
+void save_fpu_state(struct fpu *state, int flags);
+void __kernel_fpu_begin(struct kernel_fpu *state, int flags);
+void __kernel_fpu_end(struct kernel_fpu *state, int flags);
+
+static __always_inline void save_vx_regs(__vector128 *vxrs)
+{
+ fpu_vstm(0, 15, &vxrs[0]);
+ fpu_vstm(16, 31, &vxrs[16]);
+}
+
+static __always_inline void load_vx_regs(__vector128 *vxrs)
+{
+ fpu_vlm(0, 15, &vxrs[0]);
+ fpu_vlm(16, 31, &vxrs[16]);
+}
+
+static __always_inline void __save_fp_regs(freg_t *fprs, unsigned int offset)
+{
+ fpu_std(0, &fprs[0 * offset]);
+ fpu_std(1, &fprs[1 * offset]);
+ fpu_std(2, &fprs[2 * offset]);
+ fpu_std(3, &fprs[3 * offset]);
+ fpu_std(4, &fprs[4 * offset]);
+ fpu_std(5, &fprs[5 * offset]);
+ fpu_std(6, &fprs[6 * offset]);
+ fpu_std(7, &fprs[7 * offset]);
+ fpu_std(8, &fprs[8 * offset]);
+ fpu_std(9, &fprs[9 * offset]);
+ fpu_std(10, &fprs[10 * offset]);
+ fpu_std(11, &fprs[11 * offset]);
+ fpu_std(12, &fprs[12 * offset]);
+ fpu_std(13, &fprs[13 * offset]);
+ fpu_std(14, &fprs[14 * offset]);
+ fpu_std(15, &fprs[15 * offset]);
+}
+
+static __always_inline void __load_fp_regs(freg_t *fprs, unsigned int offset)
+{
+ fpu_ld(0, &fprs[0 * offset]);
+ fpu_ld(1, &fprs[1 * offset]);
+ fpu_ld(2, &fprs[2 * offset]);
+ fpu_ld(3, &fprs[3 * offset]);
+ fpu_ld(4, &fprs[4 * offset]);
+ fpu_ld(5, &fprs[5 * offset]);
+ fpu_ld(6, &fprs[6 * offset]);
+ fpu_ld(7, &fprs[7 * offset]);
+ fpu_ld(8, &fprs[8 * offset]);
+ fpu_ld(9, &fprs[9 * offset]);
+ fpu_ld(10, &fprs[10 * offset]);
+ fpu_ld(11, &fprs[11 * offset]);
+ fpu_ld(12, &fprs[12 * offset]);
+ fpu_ld(13, &fprs[13 * offset]);
+ fpu_ld(14, &fprs[14 * offset]);
+ fpu_ld(15, &fprs[15 * offset]);
+}
+
+static __always_inline void save_fp_regs(freg_t *fprs)
+{
+ __save_fp_regs(fprs, sizeof(freg_t) / sizeof(freg_t));
+}
+
+static __always_inline void load_fp_regs(freg_t *fprs)
+{
+ __load_fp_regs(fprs, sizeof(freg_t) / sizeof(freg_t));
+}
+
+static __always_inline void save_fp_regs_vx(__vector128 *vxrs)
+{
+ freg_t *fprs = (freg_t *)&vxrs[0].high;
+
+ __save_fp_regs(fprs, sizeof(__vector128) / sizeof(freg_t));
+}
+
+static __always_inline void load_fp_regs_vx(__vector128 *vxrs)
+{
+ freg_t *fprs = (freg_t *)&vxrs[0].high;
+
+ __load_fp_regs(fprs, sizeof(__vector128) / sizeof(freg_t));
+}
+
+static inline void load_user_fpu_regs(void)
+{
+ struct thread_struct *thread = &current->thread;
+
+ if (!thread->ufpu_flags)
+ return;
+ load_fpu_state(&thread->ufpu, thread->ufpu_flags);
+ thread->ufpu_flags = 0;
+}
+
+static __always_inline void __save_user_fpu_regs(struct thread_struct *thread, int flags)
+{
+ save_fpu_state(&thread->ufpu, flags);
+ __atomic_or(flags, &thread->ufpu_flags);
+}
+
+static inline void save_user_fpu_regs(void)
+{
+ struct thread_struct *thread = &current->thread;
+ int mask, flags;
+
+ mask = __atomic_or(KERNEL_FPC | KERNEL_VXR, &thread->kfpu_flags);
+ flags = ~READ_ONCE(thread->ufpu_flags) & (KERNEL_FPC | KERNEL_VXR);
+ if (flags)
+ __save_user_fpu_regs(thread, flags);
+ barrier();
+ WRITE_ONCE(thread->kfpu_flags, mask);
+}
+
+static __always_inline void _kernel_fpu_begin(struct kernel_fpu *state, int flags)
+{
+ struct thread_struct *thread = &current->thread;
+ int mask, uflags;
+
+ mask = __atomic_or(flags, &thread->kfpu_flags);
+ state->hdr.mask = mask;
+ uflags = READ_ONCE(thread->ufpu_flags);
+ if ((uflags & flags) != flags)
+ __save_user_fpu_regs(thread, ~uflags & flags);
+ if (mask & flags)
+ __kernel_fpu_begin(state, flags);
+}
+
+static __always_inline void _kernel_fpu_end(struct kernel_fpu *state, int flags)
+{
+ int mask = state->hdr.mask;
+
+ if (mask & flags)
+ __kernel_fpu_end(state, flags);
+ barrier();
+ WRITE_ONCE(current->thread.kfpu_flags, mask);
+}
+
+void __kernel_fpu_invalid_size(void);
+
+static __always_inline void kernel_fpu_check_size(int flags, unsigned int size)
+{
+ unsigned int cnt = 0;
+
+ if (flags & KERNEL_VXR_V0V7)
+ cnt += 8;
+ if (flags & KERNEL_VXR_V8V15)
+ cnt += 8;
+ if (flags & KERNEL_VXR_V16V23)
+ cnt += 8;
+ if (flags & KERNEL_VXR_V24V31)
+ cnt += 8;
+ if (cnt != size)
+ __kernel_fpu_invalid_size();
+}
+
+#define kernel_fpu_begin(state, flags) \
+{ \
+ typeof(state) s = (state); \
+ int _flags = (flags); \
+ \
+ kernel_fpu_check_size(_flags, ARRAY_SIZE(s->vxrs)); \
+ _kernel_fpu_begin((struct kernel_fpu *)s, _flags); \
+}
+
+#define kernel_fpu_end(state, flags) \
+{ \
+ typeof(state) s = (state); \
+ int _flags = (flags); \
+ \
+ kernel_fpu_check_size(_flags, ARRAY_SIZE(s->vxrs)); \
+ _kernel_fpu_end((struct kernel_fpu *)s, _flags); \
+}
+
+static inline void save_kernel_fpu_regs(struct thread_struct *thread)
+{
+ if (!thread->kfpu_flags)
+ return;
+ save_fpu_state(&thread->kfpu, thread->kfpu_flags);
+}
+
+static inline void restore_kernel_fpu_regs(struct thread_struct *thread)
+{
+ if (!thread->kfpu_flags)
+ return;
+ load_fpu_state(&thread->kfpu, thread->kfpu_flags);
+}
+
+static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
+{
+ int i;
+
+ for (i = 0; i < __NUM_FPRS; i++)
+ fprs[i].ui = vxrs[i].high;
+}
+
+static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
+{
+ int i;
+
+ for (i = 0; i < __NUM_FPRS; i++)
+ vxrs[i].high = fprs[i].ui;
+}
+
+static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+{
+ fpregs->pad = 0;
+ fpregs->fpc = fpu->fpc;
+ convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
+}
+
+static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
+{
+ fpu->fpc = fpregs->fpc;
+ convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
+}
+
+#endif /* _ASM_S390_FPU_H */
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
deleted file mode 100644
index b714ed0ef688..000000000000
--- a/arch/s390/include/asm/fpu/api.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * In-kernel FPU support functions
- *
- *
- * Consider these guidelines before using in-kernel FPU functions:
- *
- * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
- * use of floating-point or vector registers and instructions.
- *
- * 2. For kernel_fpu_begin(), specify the vector register range you want to
- * use with the KERNEL_VXR_* constants. Consider these usage guidelines:
- *
- * a) If your function typically runs in process-context, use the lower
- * half of the vector registers, for example, specify KERNEL_VXR_LOW.
- * b) If your function typically runs in soft-irq or hard-irq context,
- * prefer using the upper half of the vector registers, for example,
- * specify KERNEL_VXR_HIGH.
- *
- * If you adhere to these guidelines, an interrupted process context
- * does not require to save and restore vector registers because of
- * disjoint register ranges.
- *
- * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
- * includes logic to save and restore up to 16 vector registers at once.
- *
- * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
- * struct kernel_fpu states. Vector registers that are in use by outer
- * levels are saved and restored. You can minimize the save and restore
- * effort by choosing disjoint vector register ranges.
- *
- * 5. To use vector floating-point instructions, specify the KERNEL_FPC
- * flag to save and restore floating-point controls in addition to any
- * vector register range.
- *
- * 6. To use floating-point registers and instructions only, specify the
- * KERNEL_FPR flag. This flag triggers a save and restore of vector
- * registers V0 to V15 and floating-point controls.
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_S390_FPU_API_H
-#define _ASM_S390_FPU_API_H
-
-#include <linux/preempt.h>
-#include <asm/asm-extable.h>
-
-void save_fpu_regs(void);
-void load_fpu_regs(void);
-void __load_fpu_regs(void);
-
-static inline int test_fp_ctl(u32 fpc)
-{
- u32 orig_fpc;
- int rc;
-
- asm volatile(
- " efpc %1\n"
- " sfpc %2\n"
- "0: sfpc %1\n"
- " la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc), "=&d" (orig_fpc)
- : "d" (fpc), "0" (-EINVAL));
- return rc;
-}
-
-#define KERNEL_FPC 1
-#define KERNEL_VXR_V0V7 2
-#define KERNEL_VXR_V8V15 4
-#define KERNEL_VXR_V16V23 8
-#define KERNEL_VXR_V24V31 16
-
-#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
-#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
-#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
-
-#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
-
-struct kernel_fpu;
-
-/*
- * Note the functions below must be called with preemption disabled.
- * Do not enable preemption before calling __kernel_fpu_end() to prevent
- * an corruption of an existing kernel FPU state.
- *
- * Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
- */
-void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
-void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
-
-
-static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
-{
- preempt_disable();
- state->mask = S390_lowcore.fpu_flags;
- if (!test_cpu_flag(CIF_FPU))
- /* Save user space FPU state and register contents */
- save_fpu_regs();
- else if (state->mask & flags)
- /* Save FPU/vector register in-use by the kernel */
- __kernel_fpu_begin(state, flags);
- S390_lowcore.fpu_flags |= flags;
-}
-
-static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
-{
- S390_lowcore.fpu_flags = state->mask;
- if (state->mask & flags)
- /* Restore FPU/vector register in-use by the kernel */
- __kernel_fpu_end(state, flags);
- preempt_enable();
-}
-
-#endif /* _ASM_S390_FPU_API_H */
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
deleted file mode 100644
index 4a71dbbf76fb..000000000000
--- a/arch/s390/include/asm/fpu/internal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * FPU state and register content conversion primitives
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_S390_FPU_INTERNAL_H
-#define _ASM_S390_FPU_INTERNAL_H
-
-#include <linux/string.h>
-#include <asm/ctl_reg.h>
-#include <asm/fpu/types.h>
-
-static inline void save_vx_regs(__vector128 *vxrs)
-{
- asm volatile(
- " la 1,%0\n"
- " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
- : "=Q" (*(struct vx_array *) vxrs) : : "1");
-}
-
-static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
-{
- int i;
-
- for (i = 0; i < __NUM_FPRS; i++)
- fprs[i] = *(freg_t *)(vxrs + i);
-}
-
-static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
-{
- int i;
-
- for (i = 0; i < __NUM_FPRS; i++)
- *(freg_t *)(vxrs + i) = fprs[i];
-}
-
-static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
-{
- fpregs->pad = 0;
- fpregs->fpc = fpu->fpc;
- if (MACHINE_HAS_VX)
- convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
- else
- memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
- sizeof(fpregs->fprs));
-}
-
-static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
-{
- fpu->fpc = fpregs->fpc;
- if (MACHINE_HAS_VX)
- convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
- else
- memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
- sizeof(fpregs->fprs));
-}
-
-#endif /* _ASM_S390_FPU_INTERNAL_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
deleted file mode 100644
index d889e9436865..000000000000
--- a/arch/s390/include/asm/fpu/types.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * FPU data structures
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_S390_FPU_TYPES_H
-#define _ASM_S390_FPU_TYPES_H
-
-#include <asm/sigcontext.h>
-
-struct fpu {
- __u32 fpc; /* Floating-point control */
- void *regs; /* Pointer to the current save area */
- union {
- /* Floating-point register save area */
- freg_t fprs[__NUM_FPRS];
- /* Vector register save area */
- __vector128 vxrs[__NUM_VXRS];
- };
-};
-
-/* VX array structure for address operand constraints in inline assemblies */
-struct vx_array { __vector128 _[__NUM_VXRS]; };
-
-/* In-kernel FPU state structure */
-struct kernel_fpu {
- u32 mask;
- u32 fpc;
- union {
- freg_t fprs[__NUM_FPRS];
- __vector128 vxrs[__NUM_VXRS];
- };
-};
-
-#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 6f80ec9c04be..621f23d5ae30 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -9,7 +9,7 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_CC_IS_CLANG
-/* https://bugs.llvm.org/show_bug.cgi?id=41424 */
+/* https://llvm.org/pr41424 */
#define ftrace_return_address(n) 0UL
#else
#define ftrace_return_address(n) __builtin_return_address(n)
@@ -54,12 +54,50 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return NULL;
}
-static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
- unsigned long ip)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+struct fgraph_ret_regs {
+ unsigned long gpr2;
+ unsigned long fp;
+};
+
+static __always_inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->gpr2;
+}
+
+static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->fp;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+static __always_inline unsigned long
+ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->regs.psw.addr;
+}
+
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long ip)
{
fregs->regs.psw.addr = ip;
}
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&(fregs)->regs)
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(&(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* When an ftrace registered caller is tracing a function that is
* also set by a register_ftrace_direct() call, it needs to be
@@ -67,10 +105,12 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f
* place the direct caller in the ORIG_GPR2 part of pt_regs. This
* tells the ftrace_caller that there's a direct caller.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
+ struct pt_regs *regs = &fregs->regs;
regs->orig_gpr2 = addr;
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/*
* Even though the system call numbers are identical for s390/s390x a
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index e08c882dccaa..eaeaeb3ff0be 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -17,7 +17,8 @@
"3: jl 1b\n" \
" lhi %0,0\n" \
"4: sacf 768\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
+ EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
+ EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 40264f60b0da..5cc46e0dde62 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -147,5 +147,42 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
int gmap_mark_unmergeable(void);
-void s390_reset_acc(struct mm_struct *mm);
+void s390_unlist_old_asce(struct gmap *gmap);
+int s390_replace_asce(struct gmap *gmap);
+void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
+int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool interruptible);
+
+/**
+ * s390_uv_destroy_range - Destroy a range of pages in the given mm.
+ * @mm: the mm on which to operate on
+ * @start: the start of the range
+ * @end: the end of the range
+ *
+ * This function will call cond_sched, so it should not generate stalls, but
+ * it will otherwise only return when it completed.
+ */
+static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ (void)__s390_uv_destroy_range(mm, start, end, false);
+}
+
+/**
+ * s390_uv_destroy_range_interruptible - Destroy a range of pages in the
+ * given mm, but stop when a fatal signal is received.
+ * @mm: the mm on which to operate on
+ * @start: the start of the range
+ * @end: the end of the range
+ *
+ * This function will call cond_sched, so it should not generate stalls. If
+ * a fatal signal is received, it will return with -EINTR immediately,
+ * without finishing destroying the whole range. Upon successful
+ * completion, 0 is returned.
+ */
+static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ return __s390_uv_destroy_range(mm, start, end, true);
+}
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index f22beda9e6d5..deb198a61039 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -16,6 +16,8 @@
#define hugepages_supported() (MACHINE_HAS_EDAT1)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
@@ -28,9 +30,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
- if (len & ~HPAGE_MASK)
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
return -EINVAL;
- if (addr & ~HPAGE_MASK)
+ if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
@@ -63,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(huge_ptep_get(ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
}
@@ -72,7 +76,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
@@ -102,7 +106,7 @@ static inline int huge_pte_dirty(pte_t pte)
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite_novma(pte);
}
static inline pte_t huge_pte_mkdirty(pte_t pte)
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index 40eae2c08d61..ac68c657b28c 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
+/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
@@ -17,47 +17,65 @@
#include <linux/err.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <asm/cio.h>
#include <linux/uaccess.h>
+#include <asm/dma-types.h>
+#include <asm/cio.h>
+
+#define IDA_SIZE_SHIFT 12
+#define IDA_BLOCK_SIZE (1UL << IDA_SIZE_SHIFT)
-#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
-#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
+#define IDA_2K_SIZE_SHIFT 11
+#define IDA_2K_BLOCK_SIZE (1UL << IDA_2K_SIZE_SHIFT)
/*
* Test if an address/length pair needs an idal list.
*/
-static inline int
-idal_is_needed(void *vaddr, unsigned int length)
+static inline bool idal_is_needed(void *vaddr, unsigned int length)
{
- return ((__pa(vaddr) + length - 1) >> 31) != 0;
-}
+ dma64_t paddr = virt_to_dma64(vaddr);
+ return (((__force unsigned long)(paddr) + length - 1) >> 31) != 0;
+}
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
{
- return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
- (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
+ unsigned int cidaw;
+
+ cidaw = (unsigned long)vaddr & (IDA_BLOCK_SIZE - 1);
+ cidaw += length + IDA_BLOCK_SIZE - 1;
+ cidaw >>= IDA_SIZE_SHIFT;
+ return cidaw;
+}
+
+/*
+ * Return the number of 2K IDA words needed for an address/length pair.
+ */
+static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length)
+{
+ unsigned int cidaw;
+
+ cidaw = (unsigned long)vaddr & (IDA_2K_BLOCK_SIZE - 1);
+ cidaw += length + IDA_2K_BLOCK_SIZE - 1;
+ cidaw >>= IDA_2K_SIZE_SHIFT;
+ return cidaw;
}
/*
* Create the list of idal words for an address/length pair.
*/
-static inline unsigned long *idal_create_words(unsigned long *idaws,
- void *vaddr, unsigned int length)
+static inline dma64_t *idal_create_words(dma64_t *idaws, void *vaddr, unsigned int length)
{
- unsigned long paddr;
+ dma64_t paddr = virt_to_dma64(vaddr);
unsigned int cidaw;
- paddr = __pa(vaddr);
- cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
- (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
- paddr &= -IDA_BLOCK_SIZE;
+ cidaw = idal_nr_words(vaddr, length);
+ paddr = dma64_and(paddr, -IDA_BLOCK_SIZE);
while (--cidaw > 0) {
- paddr += IDA_BLOCK_SIZE;
+ paddr = dma64_add(paddr, IDA_BLOCK_SIZE);
*idaws++ = paddr;
}
return idaws;
@@ -67,36 +85,33 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
-static inline int
-set_normalized_cda(struct ccw1 * ccw, void *vaddr)
+static inline int set_normalized_cda(struct ccw1 *ccw, void *vaddr)
{
unsigned int nridaws;
- unsigned long *idal;
+ dma64_t *idal;
if (ccw->flags & CCW_FLAG_IDA)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
- idal = kmalloc(nridaws * sizeof(unsigned long),
- GFP_ATOMIC | GFP_DMA );
- if (idal == NULL)
+ idal = kcalloc(nridaws, sizeof(*idal), GFP_ATOMIC | GFP_DMA);
+ if (!idal)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
- ccw->cda = (__u32)(unsigned long) vaddr;
+ ccw->cda = virt_to_dma32(vaddr);
return 0;
}
/*
* Releases any allocated IDAL related to the CCW.
*/
-static inline void
-clear_normalized_cda(struct ccw1 * ccw)
+static inline void clear_normalized_cda(struct ccw1 *ccw)
{
if (ccw->flags & CCW_FLAG_IDA) {
- kfree((void *)(unsigned long) ccw->cda);
+ kfree(dma32_to_virt(ccw->cda));
ccw->flags &= ~CCW_FLAG_IDA;
}
ccw->cda = 0;
@@ -108,125 +123,138 @@ clear_normalized_cda(struct ccw1 * ccw)
struct idal_buffer {
size_t size;
size_t page_order;
- void *data[];
+ dma64_t data[];
};
/*
* Allocate an idal buffer
*/
-static inline struct idal_buffer *
-idal_buffer_alloc(size_t size, int page_order)
+static inline struct idal_buffer *idal_buffer_alloc(size_t size, int page_order)
{
- struct idal_buffer *ib;
int nr_chunks, nr_ptrs, i;
+ struct idal_buffer *ib;
+ void *vaddr;
- nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
- nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
+ nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT;
+ nr_chunks = (PAGE_SIZE << page_order) >> IDA_SIZE_SHIFT;
ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL);
- if (ib == NULL)
+ if (!ib)
return ERR_PTR(-ENOMEM);
ib->size = size;
ib->page_order = page_order;
for (i = 0; i < nr_ptrs; i++) {
- if ((i & (nr_chunks - 1)) != 0) {
- ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
- continue;
- }
- ib->data[i] = (void *)
- __get_free_pages(GFP_KERNEL, page_order);
- if (ib->data[i] != NULL)
+ if (i & (nr_chunks - 1)) {
+ ib->data[i] = dma64_add(ib->data[i - 1], IDA_BLOCK_SIZE);
continue;
- // Not enough memory
- while (i >= nr_chunks) {
- i -= nr_chunks;
- free_pages((unsigned long) ib->data[i],
- ib->page_order);
}
- kfree(ib);
- return ERR_PTR(-ENOMEM);
+ vaddr = (void *)__get_free_pages(GFP_KERNEL, page_order);
+ if (!vaddr)
+ goto error;
+ ib->data[i] = virt_to_dma64(vaddr);
}
return ib;
+error:
+ while (i >= nr_chunks) {
+ i -= nr_chunks;
+ vaddr = dma64_to_virt(ib->data[i]);
+ free_pages((unsigned long)vaddr, ib->page_order);
+ }
+ kfree(ib);
+ return ERR_PTR(-ENOMEM);
}
/*
* Free an idal buffer.
*/
-static inline void
-idal_buffer_free(struct idal_buffer *ib)
+static inline void idal_buffer_free(struct idal_buffer *ib)
{
int nr_chunks, nr_ptrs, i;
+ void *vaddr;
- nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
- nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
- for (i = 0; i < nr_ptrs; i += nr_chunks)
- free_pages((unsigned long) ib->data[i], ib->page_order);
+ nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT;
+ nr_chunks = (PAGE_SIZE << ib->page_order) >> IDA_SIZE_SHIFT;
+ for (i = 0; i < nr_ptrs; i += nr_chunks) {
+ vaddr = dma64_to_virt(ib->data[i]);
+ free_pages((unsigned long)vaddr, ib->page_order);
+ }
kfree(ib);
}
/*
* Test if a idal list is really needed.
*/
-static inline int
-__idal_buffer_is_needed(struct idal_buffer *ib)
+static inline bool __idal_buffer_is_needed(struct idal_buffer *ib)
{
- return ib->size > (4096ul << ib->page_order) ||
- idal_is_needed(ib->data[0], ib->size);
+ if (ib->size > (PAGE_SIZE << ib->page_order))
+ return true;
+ return idal_is_needed(dma64_to_virt(ib->data[0]), ib->size);
}
/*
* Set channel data address to idal buffer.
*/
-static inline void
-idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
+static inline void idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
{
+ void *vaddr;
+
if (__idal_buffer_is_needed(ib)) {
- // setup idals;
- ccw->cda = (u32)(addr_t) ib->data;
+ /* Setup idals */
+ ccw->cda = virt_to_dma32(ib->data);
ccw->flags |= CCW_FLAG_IDA;
- } else
- // we do not need idals - use direct addressing
- ccw->cda = (u32)(addr_t) ib->data[0];
+ } else {
+ /*
+ * No idals needed - use direct addressing. Convert from
+ * dma64_t to virt and then to dma32_t only because of type
+ * checking. The physical address is known to be below 2GB.
+ */
+ vaddr = dma64_to_virt(ib->data[0]);
+ ccw->cda = virt_to_dma32(vaddr);
+ }
ccw->count = ib->size;
}
/*
* Copy count bytes from an idal buffer to user memory
*/
-static inline size_t
-idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
+static inline size_t idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
{
size_t left;
+ void *vaddr;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
- left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
+ vaddr = dma64_to_virt(ib->data[i]);
+ left = copy_to_user(to, vaddr, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
- to = (void __user *) to + IDA_BLOCK_SIZE;
+ to = (void __user *)to + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
- return copy_to_user(to, ib->data[i], count);
+ vaddr = dma64_to_virt(ib->data[i]);
+ return copy_to_user(to, vaddr, count);
}
/*
* Copy count bytes from user memory to an idal buffer
*/
-static inline size_t
-idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
+static inline size_t idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
{
size_t left;
+ void *vaddr;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
- left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
+ vaddr = dma64_to_virt(ib->data[i]);
+ left = copy_from_user(vaddr, from, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
- from = (void __user *) from + IDA_BLOCK_SIZE;
+ from = (void __user *)from + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
- return copy_from_user(ib->data[i], from, count);
+ vaddr = dma64_to_virt(ib->data[i]);
+ return copy_from_user(vaddr, from, count);
}
#endif
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 5cea629c548e..09f763b9eb40 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -10,16 +10,12 @@
#include <linux/types.h>
#include <linux/device.h>
-#include <linux/seqlock.h>
struct s390_idle_data {
- seqcount_t seqcount;
unsigned long idle_count;
unsigned long idle_time;
unsigned long clock_idle_enter;
- unsigned long clock_idle_exit;
unsigned long timer_idle_enter;
- unsigned long timer_idle_exit;
unsigned long mt_cycles_enter[8];
};
@@ -27,6 +23,5 @@ extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
-void psw_idle_exit(void);
#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index e3882b012bfa..4453ad7c11ac 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -22,11 +22,18 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
-void __iomem *ioremap(phys_addr_t addr, size_t size);
-void __iomem *ioremap_wc(phys_addr_t addr, size_t size);
-void __iomem *ioremap_wt(phys_addr_t addr, size_t size);
-void iounmap(volatile void __iomem *addr);
+/*
+ * I/O memory mapping functions.
+ */
+#define ioremap_prot ioremap_prot
+#define iounmap iounmap
+
+#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL)
+
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
+#define ioremap_wt(addr, size) \
+ ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -51,10 +58,6 @@ static inline void ioport_unmap(void __iomem *p)
#define pci_iomap_wc pci_iomap_wc
#define pci_iomap_wc_range pci_iomap_wc_range
-#define ioremap ioremap
-#define ioremap_wt ioremap_wt
-#define ioremap_wc ioremap_wc
-
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index a405b6bb89fb..b0d00032479d 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -22,6 +22,7 @@ struct ipl_parameter_block {
struct ipl_pb0_common common;
struct ipl_pb0_fcp fcp;
struct ipl_pb0_ccw ccw;
+ struct ipl_pb0_eckd eckd;
struct ipl_pb0_nvme nvme;
char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
};
@@ -41,6 +42,10 @@ struct ipl_parameter_block {
sizeof(struct ipl_pb0_ccw))
#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
+#define IPL_BP_ECKD_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_eckd))
+#define IPL_BP0_ECKD_LEN (sizeof(struct ipl_pb0_eckd))
+
#define IPL_MAX_SUPPORTED_VERSION (0)
#define IPL_RB_CERT_UNKNOWN ((unsigned short)-1)
@@ -68,6 +73,8 @@ enum ipl_type {
IPL_TYPE_NSS = 16,
IPL_TYPE_NVME = 32,
IPL_TYPE_NVME_DUMP = 64,
+ IPL_TYPE_ECKD = 128,
+ IPL_TYPE_ECKD_DUMP = 256,
};
struct ipl_info
@@ -79,6 +86,9 @@ struct ipl_info
} ccw;
struct {
struct ccw_dev_id dev_id;
+ } eckd;
+ struct {
+ struct ccw_dev_id dev_id;
u64 wwpn;
u64 lun;
} fcp;
@@ -99,6 +109,7 @@ extern void set_os_info_reipl_block(void);
static inline bool is_ipl_type_dump(void)
{
return (ipl_info.type == IPL_TYPE_FCP_DUMP) ||
+ (ipl_info.type == IPL_TYPE_ECKD_DUMP) ||
(ipl_info.type == IPL_TYPE_NVME_DUMP);
}
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 89902f754740..54b42817f70a 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -31,6 +31,7 @@
#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h>
+#include <asm/ctlreg.h>
enum interruption_class {
IRQEXT_CLK,
@@ -101,17 +102,17 @@ enum irq_subclass {
};
#define CR0_IRQ_SUBCLASS_MASK \
- ((1UL << (63 - 30)) /* Warning Track */ | \
- (1UL << (63 - 48)) /* Malfunction Alert */ | \
- (1UL << (63 - 49)) /* Emergency Signal */ | \
- (1UL << (63 - 50)) /* External Call */ | \
- (1UL << (63 - 52)) /* Clock Comparator */ | \
- (1UL << (63 - 53)) /* CPU Timer */ | \
- (1UL << (63 - 54)) /* Service Signal */ | \
- (1UL << (63 - 57)) /* Interrupt Key */ | \
- (1UL << (63 - 58)) /* Measurement Alert */ | \
- (1UL << (63 - 59)) /* Timing Alert */ | \
- (1UL << (63 - 62))) /* IUCV */
+ (CR0_WARNING_TRACK | \
+ CR0_MALFUNCTION_ALERT_SUBMASK | \
+ CR0_EMERGENCY_SIGNAL_SUBMASK | \
+ CR0_EXTERNAL_CALL_SUBMASK | \
+ CR0_CLOCK_COMPARATOR_SUBMASK | \
+ CR0_CPU_TIMER_SUBMASK | \
+ CR0_SERVICE_SIGNAL_SUBMASK | \
+ CR0_INTERRUPT_KEY_SUBMASK | \
+ CR0_MEASUREMENT_ALERT_SUBMASK | \
+ CR0_ETR_SUBMASK | \
+ CR0_IUCV)
void irq_subclass_register(enum irq_subclass subclass);
void irq_subclass_unregister(enum irq_subclass subclass);
diff --git a/arch/s390/include/asm/irq_work.h b/arch/s390/include/asm/irq_work.h
index 603783766d0a..f00c9f610d5a 100644
--- a/arch/s390/include/asm/irq_work.h
+++ b/arch/s390/include/asm/irq_work.h
@@ -7,6 +7,4 @@ static inline bool arch_irq_work_has_interrupt(void)
return true;
}
-void arch_irq_work_raise(void);
-
#endif /* _ASM_S390_IRQ_WORK_H */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 895f774bbcc5..bf78cf381dfc 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -25,7 +25,7 @@
*/
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("0: brcl 0,%l[label]\n"
+ asm goto("0: brcl 0,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
@@ -39,7 +39,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("0: brcl 15,%l[label]\n"
+ asm goto("0: brcl 15,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 2768d5db181f..0cffead0f2f2 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#include <asm/pgtable.h>
+#include <linux/const.h>
#ifdef CONFIG_KASAN
@@ -13,39 +13,6 @@
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-extern void kasan_early_init(void);
-extern void kasan_copy_shadow_mapping(void);
-extern void kasan_free_early_identity(void);
-
-/*
- * Estimate kasan memory requirements, which it will reserve
- * at the very end of available physical memory. To estimate
- * that, we take into account that kasan would require
- * 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the whole memory + shadow memory
- * region (1 + 1/8). To keep page tables estimates simple take
- * the double of combined ptes size.
- *
- * physmem parameter has to be already adjusted if not entire physical memory
- * would be used (e.g. due to effect of "mem=" option).
- */
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
-{
- unsigned long kasan_needs;
- unsigned long pages;
- /* for shadow memory */
- kasan_needs = round_up(physmem / 8, PAGE_SIZE);
- /* for paging structures */
- pages = DIV_ROUND_UP(physmem + kasan_needs, PAGE_SIZE);
- kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-
- return kasan_needs;
-}
-#else
-static inline void kasan_early_init(void) { }
-static inline void kasan_copy_shadow_mapping(void) { }
-static inline void kasan_free_early_identity(void) { }
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif
#endif
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
index d55ba878378b..e47fd8cbe701 100644
--- a/arch/s390/include/asm/kfence.h
+++ b/arch/s390/include/asm/kfence.h
@@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
- __kernel_map_pages(virt_to_page(addr), 1, !protect);
+ __kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
return true;
}
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 598095f4b924..01f1682a73b7 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -15,6 +15,7 @@
* <grundym@us.ibm.com>
*/
#include <linux/types.h>
+#include <asm/ctlreg.h>
#include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0x0002
@@ -65,17 +66,13 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_imask;
- unsigned long kprobe_saved_ctl[3];
+ struct ctlreg kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
};
void arch_remove_kprobe(struct kprobe *p);
-void __kretprobe_trampoline(void);
-void trampoline_probe_handler(struct pt_regs *regs);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 766028d54a3e..95990461888f 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -19,9 +19,11 @@
#include <linux/kvm.h>
#include <linux/seqlock.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mmu_notifier.h>
#include <asm/debug.h>
#include <asm/cpu.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
#include <asm/isc.h>
#include <asm/guarded_storage.h>
@@ -93,19 +95,30 @@ union ipte_control {
};
};
+union sca_utility {
+ __u16 val;
+ struct {
+ __u16 mtcr : 1;
+ __u16 reserved : 15;
+ };
+};
+
struct bsca_block {
union ipte_control ipte_control;
__u64 reserved[5];
__u64 mcn;
- __u64 reserved2;
+ union sca_utility utility;
+ __u8 reserved2[6];
struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
};
struct esca_block {
union ipte_control ipte_control;
- __u64 reserved1[7];
+ __u64 reserved1[6];
+ union sca_utility utility;
+ __u8 reserved2[6];
__u64 mcn[4];
- __u64 reserved2[20];
+ __u64 reserved3[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
};
@@ -129,8 +142,7 @@ struct mcck_volatile_info {
CR14_EXTERNAL_DAMAGE_SUBMASK)
#define SIDAD_SIZE_MASK 0xff
-#define sida_origin(sie_block) \
- ((sie_block)->sidad & PAGE_MASK)
+#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
#define sida_size(sie_block) \
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
@@ -249,16 +261,21 @@ struct kvm_s390_sie_block {
#define ECB_SPECI 0x08
#define ECB_SRSI 0x04
#define ECB_HOSTPROTINT 0x02
+#define ECB_PTF 0x01
__u8 ecb; /* 0x0061 */
#define ECB2_CMMA 0x80
#define ECB2_IEP 0x20
#define ECB2_PFMFI 0x08
#define ECB2_ESCA 0x04
+#define ECB2_ZPCI_LSI 0x02
__u8 ecb2; /* 0x0062 */
+#define ECB3_AISI 0x20
+#define ECB3_AISII 0x10
#define ECB3_DEA 0x08
#define ECB3_AES 0x04
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
+#define ESCA_SCAOL_MASK ~0x3fU
__u32 scaol; /* 0x0064 */
__u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
@@ -726,7 +743,6 @@ struct kvm_vcpu_arch {
struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS];
struct gs_cb *host_gscb;
- struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
@@ -748,6 +764,8 @@ struct kvm_vcpu_arch {
__u64 cputm_start;
bool gs_enabled;
bool skey_enabled;
+ /* Indicator if the access registers have been loaded from guest */
+ bool acrs_loaded;
struct kvm_s390_pv_vcpu pv;
union diag318_info diag318_info;
};
@@ -759,6 +777,14 @@ struct kvm_vm_stat {
u64 inject_pfault_done;
u64 inject_service_signal;
u64 inject_virtio;
+ u64 aen_forward;
+ u64 gmap_shadow_create;
+ u64 gmap_shadow_reuse;
+ u64 gmap_shadow_r1_entry;
+ u64 gmap_shadow_r2_entry;
+ u64 gmap_shadow_r3_entry;
+ u64 gmap_shadow_sg_entry;
+ u64 gmap_shadow_pg_entry;
};
struct kvm_arch_memory_slot {
@@ -793,12 +819,14 @@ struct s390_io_adapter {
struct kvm_s390_cpu_model {
/* facility mask supported by kvm & hosting machine */
- __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
+ __u64 fac_mask[S390_ARCH_FAC_MASK_SIZE_U64];
struct kvm_s390_vm_cpu_subfunc subfuncs;
/* facility list requested by guest (in dma page) */
__u64 *fac_list;
u64 cpuid;
unsigned short ibc;
+ /* subset of available UV-features for pv-guests enabled by user space */
+ struct kvm_s390_vm_cpu_uv_feat uv_feat_guest;
};
typedef int (*crypto_hook)(struct kvm_vcpu *vcpu);
@@ -923,6 +951,10 @@ struct kvm_s390_pv {
u64 guest_len;
unsigned long stor_base;
void *stor_var;
+ bool dumping;
+ void *set_aside;
+ struct list_head need_cleanup;
+ struct mmu_notifier mmu_notifier;
};
struct kvm_arch{
@@ -939,6 +971,7 @@ struct kvm_arch{
int use_cmma;
int use_pfmfi;
int use_skf;
+ int use_zpci_interp;
int user_cpu_state_ctrl;
int user_sigp;
int user_stsi;
@@ -962,6 +995,8 @@ struct kvm_arch{
DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
struct kvm_s390_pv pv;
+ struct list_head kzdev_list;
+ spinlock_t kzdev_list_lock;
};
#define KVM_HVA_ERR_BAD (-1UL)
@@ -994,13 +1029,21 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
unsigned long *aqm, unsigned long *adm);
-extern int sie64a(struct kvm_s390_sie_block *, u64 *);
+int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa);
+
+static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
+{
+ return __sie64a(virt_to_phys(sie_block), sie_block, rsa);
+}
+
extern char sie_exit;
+bool kvm_s390_pv_is_protected(struct kvm *kvm);
+bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
+
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
-static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
@@ -1012,4 +1055,14 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+#define __KVM_HAVE_ARCH_VM_FREE
+void kvm_arch_free_vm(struct kvm *kvm);
+
+struct zpci_kvm_hook {
+ int (*kvm_register)(void *opaque, struct kvm *kvm);
+ void (*kvm_unregister)(void *opaque);
+};
+
+extern struct zpci_kvm_hook zpci_kvm_hook;
+
#endif
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index c76777b15fec..df3fb7d8227b 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -4,7 +4,7 @@
#include <linux/stringify.h>
-#define __ALIGN .align 16, 0x07
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 26fe5e535728..8c5f16857539 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/ptrace.h>
+#include <asm/ctlreg.h>
#include <asm/cpu.h>
#include <asm/types.h>
@@ -118,8 +119,8 @@ struct lowcore {
__u64 avg_steal_timer; /* 0x0300 */
__u64 last_update_timer; /* 0x0308 */
__u64 last_update_clock; /* 0x0310 */
- __u64 int_clock; /* 0x0318*/
- __u64 mcck_clock; /* 0x0320 */
+ __u64 int_clock; /* 0x0318 */
+ __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
__u64 clock_comparator; /* 0x0328 */
__u64 boot_clock[2]; /* 0x0330 */
@@ -139,8 +140,8 @@ struct lowcore {
__u32 restart_flags; /* 0x0384 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0388 */
- __u64 user_asce; /* 0x0390 */
+ struct ctlreg kernel_asce; /* 0x0388 */
+ struct ctlreg user_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
@@ -156,7 +157,7 @@ struct lowcore {
__s32 preempt_count; /* 0x03a8 */
__u32 spinlock_lockval; /* 0x03ac */
__u32 spinlock_index; /* 0x03b0 */
- __u32 fpu_flags; /* 0x03b4 */
+ __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b8 */
__u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
__u64 machine_flags; /* 0x03c8 */
@@ -199,11 +200,13 @@ struct lowcore {
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u64 last_break_save_area; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
- __u64 cregs_save_area[16]; /* 0x1380 */
+ struct ctlreg cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */
/* Cryptography-counter designation */
__u64 ccd; /* 0x1500 */
- __u8 pad_0x1508[0x1800-0x1508]; /* 0x1508 */
+ /* AI-extension counter designation */
+ __u64 aicd; /* 0x1508 */
+ __u8 pad_0x1510[0x1800-0x1510]; /* 0x1510 */
/* Transaction abort diagnostic block */
struct pgm_tdb pgm_tdb; /* 0x1800 */
@@ -219,12 +222,4 @@ static inline void set_prefix(__u32 address)
asm volatile("spx %0" : : "Q" (address) : "memory");
}
-static inline __u32 store_prefix(void)
-{
- __u32 address;
-
- asm volatile("stpx %0" : "=Q" (address));
- return address;
-}
-
#endif /* _ASM_S390_LOWCORE_H */
diff --git a/arch/s390/include/asm/maccess.h b/arch/s390/include/asm/maccess.h
new file mode 100644
index 000000000000..50225940d971
--- /dev/null
+++ b/arch/s390/include/asm/maccess.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_S390_MACCESS_H
+#define __ASM_S390_MACCESS_H
+
+#include <linux/types.h>
+
+#define MEMCPY_REAL_SIZE PAGE_SIZE
+#define MEMCPY_REAL_MASK PAGE_MASK
+
+struct iov_iter;
+
+extern unsigned long __memcpy_real_area;
+extern pte_t *memcpy_real_ptep;
+size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count);
+int memcpy_real(void *dest, unsigned long src, size_t count);
+#ifdef CONFIG_CRASH_DUMP
+int copy_oldmem_kernel(void *dst, unsigned long src, size_t count);
+#endif
+
+#endif /* __ASM_S390_MACCESS_H */
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
deleted file mode 100644
index a7c922a69050..000000000000
--- a/arch/s390/include/asm/mem_detect.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_MEM_DETECT_H
-#define _ASM_S390_MEM_DETECT_H
-
-#include <linux/types.h>
-
-enum mem_info_source {
- MEM_DETECT_NONE = 0,
- MEM_DETECT_SCLP_STOR_INFO,
- MEM_DETECT_DIAG260,
- MEM_DETECT_SCLP_READ_INFO,
- MEM_DETECT_BIN_SEARCH
-};
-
-struct mem_detect_block {
- u64 start;
- u64 end;
-};
-
-/*
- * Storage element id is defined as 1 byte (up to 256 storage elements).
- * In practise only storage element id 0 and 1 are used).
- * According to architecture one storage element could have as much as
- * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
- * If more mem_detect_blocks are required, a block of memory from already
- * known mem_detect_block is taken (entries_extended points to it).
- */
-#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
-
-struct mem_detect_info {
- u32 count;
- u8 info_source;
- struct mem_detect_block entries[MEM_INLINED_ENTRIES];
- struct mem_detect_block *entries_extended;
-};
-extern struct mem_detect_info mem_detect;
-
-void add_mem_detect_block(u64 start, u64 end);
-
-static inline int __get_mem_detect_block(u32 n, unsigned long *start,
- unsigned long *end)
-{
- if (n >= mem_detect.count) {
- *start = 0;
- *end = 0;
- return -1;
- }
-
- if (n < MEM_INLINED_ENTRIES) {
- *start = (unsigned long)mem_detect.entries[n].start;
- *end = (unsigned long)mem_detect.entries[n].end;
- } else {
- *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
- *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
- }
- return 0;
-}
-
-/**
- * for_each_mem_detect_block - early online memory range iterator
- * @i: an integer used as loop variable
- * @p_start: ptr to unsigned long for start address of the range
- * @p_end: ptr to unsigned long for end address of the range
- *
- * Walks over detected online memory ranges.
- */
-#define for_each_mem_detect_block(i, p_start, p_end) \
- for (i = 0, __get_mem_detect_block(i, p_start, p_end); \
- i < mem_detect.count; \
- i++, __get_mem_detect_block(i, p_start, p_end))
-
-static inline void get_mem_detect_reserved(unsigned long *start,
- unsigned long *size)
-{
- *start = (unsigned long)mem_detect.entries_extended;
- if (mem_detect.count > MEM_INLINED_ENTRIES)
- *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
- else
- *size = 0;
-}
-
-static inline unsigned long get_mem_detect_end(void)
-{
- unsigned long start;
- unsigned long end;
-
- if (mem_detect.count) {
- __get_mem_detect_block(mem_detect.count - 1, &start, &end);
- return end;
- }
- return 0;
-}
-
-#endif
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
index 08a8b96606d7..b85e13505a0f 100644
--- a/arch/s390/include/asm/mem_encrypt.h
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -4,8 +4,8 @@
#ifndef __ASSEMBLY__
-int set_memory_encrypted(unsigned long addr, int numpages);
-int set_memory_decrypted(unsigned long addr, int numpages);
+int set_memory_encrypted(unsigned long vaddr, int numpages);
+int set_memory_decrypted(unsigned long vaddr, int numpages);
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 82aae78e1315..bb1b4bef1878 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,14 +11,13 @@ typedef struct {
cpumask_t cpu_attach_mask;
atomic_t flush_count;
unsigned int flush_mm;
- struct list_head pgtable_list;
struct list_head gmap_list;
unsigned long gmap_asce;
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context belongs to a secure guest. */
- atomic_t is_protected;
+ atomic_t protected_count;
/*
* The following bitfields need a down_write on the mm
* semaphore when they are written to. As they are only
@@ -39,21 +38,6 @@ typedef struct {
#define INIT_MM_CONTEXT(name) \
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
- .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
-static inline int tprot(unsigned long addr)
-{
- int rc = -EFAULT;
-
- asm volatile(
- " tprot 0(%1),0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "a" (addr) : "cc");
- return rc;
-}
-
#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c7937f369e62..929af18b0908 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -12,7 +12,7 @@
#include <linux/uaccess.h>
#include <linux/mm_types.h>
#include <asm/tlbflush.h>
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm-generic/mm_hooks.h>
#define init_new_context init_new_context
@@ -22,11 +22,10 @@ static inline int init_new_context(struct task_struct *tsk,
unsigned long asce_type, init_entry;
spin_lock_init(&mm->context.lock);
- INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
- atomic_set(&mm->context.is_protected, 0);
+ atomic_set(&mm->context.protected_count, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
@@ -78,10 +77,10 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
if (next == &init_mm)
S390_lowcore.user_asce = s390_invalid_asce;
else
- S390_lowcore.user_asce = next->context.asce;
+ S390_lowcore.user_asce.val = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
- __ctl_load(s390_invalid_asce, 7, 7);
+ local_ctl_load(7, &s390_invalid_asce);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -111,7 +110,7 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
#define activate_mm activate_mm
@@ -120,7 +119,7 @@ static inline void activate_mm(struct mm_struct *prev,
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
#include <asm-generic/mmu_context.h>
diff --git a/arch/s390/include/asm/msi.h b/arch/s390/include/asm/msi.h
new file mode 100644
index 000000000000..399343ed9ffb
--- /dev/null
+++ b/arch/s390/include/asm/msi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MSI_H
+#define _ASM_S390_MSI_H
+#include <asm-generic/msi.h>
+
+/*
+ * Work around S390 not using irq_domain at all so we can't set
+ * IRQ_DOMAIN_FLAG_ISOLATED_MSI. See for an explanation how it works:
+ *
+ * https://lore.kernel.org/r/31af8174-35e9-ebeb-b9ef-74c90d4bfd93@linux.ibm.com/
+ *
+ * Note this is less isolated than the ARM/x86 versions as userspace can trigger
+ * MSI belonging to kernel devices within the same gisa.
+ */
+#define arch_is_isolated_msi() true
+
+#endif
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index af1cd3a6f406..227466ce9e41 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -101,9 +101,8 @@ void nmi_alloc_mcesa_early(u64 *mcesad);
int nmi_alloc_mcesa(u64 *mcesad);
void nmi_free_mcesa(u64 *mcesad);
-void s390_handle_mcck(struct pt_regs *regs);
-void __s390_handle_mcck(void);
-int s390_do_machine_check(struct pt_regs *regs);
+void s390_handle_mcck(void);
+void s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 7e9e99523e95..7a946c42ad13 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_NOSPEC_ASM_H
#define _ASM_S390_NOSPEC_ASM_H
+#include <linux/linkage.h>
#include <asm/dwarf.h>
#ifdef __ASSEMBLY__
@@ -16,7 +17,7 @@
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
.pushsection .text,"ax",@progbits
- .align 16,0x07
+ __ALIGN
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
#endif
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index 147a8d547ef9..a4d2e103f116 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -8,12 +8,17 @@
#ifndef _ASM_S390_OS_INFO_H
#define _ASM_S390_OS_INFO_H
+#include <linux/uio.h>
+
#define OS_INFO_VERSION_MAJOR 1
#define OS_INFO_VERSION_MINOR 1
#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */
#define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1
+#define OS_INFO_FLAGS_ENTRY 2
+
+#define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0)
struct os_info_entry {
u64 addr;
@@ -28,8 +33,8 @@ struct os_info {
u16 version_minor;
u64 crashkernel_addr;
u64 crashkernel_size;
- struct os_info_entry entry[2];
- u8 reserved[4024];
+ struct os_info_entry entry[3];
+ u8 reserved[4004];
} __packed;
void os_info_init(void);
@@ -39,7 +44,6 @@ u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size);
-int copy_oldmem_kernel(void *dst, unsigned long src, size_t count);
#else
static inline void *os_info_old_entry(int nr, unsigned long *size)
{
diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h
index c33c4deb545f..08fcbd628120 100644
--- a/arch/s390/include/asm/page-states.h
+++ b/arch/s390/include/asm/page-states.h
@@ -7,6 +7,9 @@
#ifndef PAGE_STATES_H
#define PAGE_STATES_H
+#include <asm/sections.h>
+#include <asm/page.h>
+
#define ESSA_GET_STATE 0
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -18,4 +21,60 @@
#define ESSA_MAX ESSA_SET_STABLE_NODAT
+extern int __bootdata_preserved(cmma_flag);
+
+static __always_inline unsigned long essa(unsigned long paddr, unsigned char cmd)
+{
+ unsigned long rc;
+
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%[rc],%[paddr],%[cmd],0"
+ : [rc] "=d" (rc)
+ : [paddr] "d" (paddr),
+ [cmd] "i" (cmd));
+ return rc;
+}
+
+static __always_inline void __set_page_state(void *addr, unsigned long num_pages, unsigned char cmd)
+{
+ unsigned long paddr = __pa(addr) & PAGE_MASK;
+
+ while (num_pages--) {
+ essa(paddr, cmd);
+ paddr += PAGE_SIZE;
+ }
+}
+
+static inline void __set_page_unused(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_UNUSED);
+}
+
+static inline void __set_page_stable_dat(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_STABLE);
+}
+
+static inline void __set_page_stable_nodat(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_STABLE_NODAT);
+}
+
+static inline void __arch_set_page_nodat(void *addr, unsigned long num_pages)
+{
+ if (!cmma_flag)
+ return;
+ if (cmma_flag < 2)
+ __set_page_stable_dat(addr, num_pages);
+ else
+ __set_page_stable_nodat(addr, num_pages);
+}
+
+static inline void __arch_set_page_dat(void *addr, unsigned long num_pages)
+{
+ if (!cmma_flag)
+ return;
+ __set_page_stable_dat(addr, num_pages);
+}
+
#endif
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 61dea67bb9c7..9381879f7ecf 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -11,7 +11,7 @@
#include <linux/const.h>
#include <asm/types.h>
-#define _PAGE_SHIFT 12
+#define _PAGE_SHIFT CONFIG_PAGE_SHIFT
#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
#define _PAGE_MASK (~(_PAGE_SIZE - 1))
@@ -19,7 +19,7 @@
#define PAGE_SHIFT _PAGE_SHIFT
#define PAGE_SIZE _PAGE_SIZE
#define PAGE_MASK _PAGE_MASK
-#define PAGE_DEFAULT_ACC 0
+#define PAGE_DEFAULT_ACC _AC(0, UL)
/* storage-protection override */
#define PAGE_SPO_ACC 9
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
@@ -73,9 +73,8 @@ static inline void copy_page(void *to, void *from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
/*
* These are used to make use of C type-checking..
@@ -165,7 +164,6 @@ static inline int page_reset_referenced(unsigned long addr)
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
-void arch_set_page_dat(struct page *page, int order);
static inline int devmem_is_allowed(unsigned long pfn)
{
@@ -180,31 +178,65 @@ int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
-#endif /* !__ASSEMBLY__ */
-
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
-#define __pa(x) ((unsigned long)(x))
+#define __pa_nodebug(x) ((unsigned long)(x))
+
+#ifdef __DECOMPRESSOR
+
+#define __pa(x) __pa_nodebug(x)
+#define __pa32(x) __pa(x)
#define __va(x) ((void *)(unsigned long)(x))
+#else /* __DECOMPRESSOR */
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+
+unsigned long __phys_addr(unsigned long x, bool is_31bit);
+
+#else /* CONFIG_DEBUG_VIRTUAL */
+
+static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
+{
+ return __pa_nodebug(x);
+}
+
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa(x) __phys_addr((unsigned long)(x), false)
+#define __pa32(x) __phys_addr((unsigned long)(x), true)
+#define __va(x) ((void *)(unsigned long)(x))
+
+#endif /* __DECOMPRESSOR */
+
#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
-#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
-#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn_to_phys(pfn));
+}
+
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return phys_to_pfn(__pa(kaddr));
+}
+
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
-#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug(kaddr)))
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+#endif /* !__ASSEMBLY__ */
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
index 5b7e33ac6f0b..3f609565734b 100644
--- a/arch/s390/include/asm/pai.h
+++ b/arch/s390/include/asm/pai.h
@@ -16,8 +16,10 @@ struct qpaci_info_block {
u64 header;
struct {
u64 : 8;
- u64 num_cc : 8; /* # of supported crypto counters */
- u64 : 48;
+ u64 num_cc : 8; /* # of supported crypto counters */
+ u64 : 9;
+ u64 num_nnpa : 7; /* # of supported NNPA counters */
+ u64 : 32;
};
};
@@ -42,6 +44,8 @@ static inline int qpaci(struct qpaci_info_block *info)
#define PAI_CRYPTO_BASE 0x1000 /* First event number */
#define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */
#define PAI_CRYPTO_KERNEL_OFFSET 2048
+#define PAI_NNPA_BASE 0x1800 /* First event number */
+#define PAI_NNPA_MAXCTR 128 /* Max # of event counters */
DECLARE_STATIC_KEY_FALSE(pai_key);
@@ -71,4 +75,11 @@ static __always_inline void pai_kernel_exit(struct pt_regs *regs)
WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
}
+enum paievt_mode {
+ PAI_MODE_NONE,
+ PAI_MODE_SAMPLING,
+ PAI_MODE_COUNTING,
+};
+
+#define PAI_SAVE_AREA(x) ((x)->hw.event_base)
#endif
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index fdb9745ee998..30820a649e6e 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -6,9 +6,9 @@
#include <linux/mutex.h>
#include <linux/iommu.h>
#include <linux/pci_hotplug.h>
-#include <asm-generic/pci.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
+#include <asm/pci_insn.h>
#include <asm/sclp.h>
#define PCIBIOS_MIN_IO 0x1000
@@ -97,6 +97,7 @@ struct zpci_bar_struct {
};
struct s390_domain;
+struct kvm_zdev;
#define ZPCI_FUNCTIONS_PER_BUS 256
struct zpci_bus {
@@ -116,18 +117,23 @@ struct zpci_bus {
struct zpci_dev {
struct zpci_bus *zbus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
- struct list_head bus_next;
+ struct list_head iommu_list;
struct kref kref;
+ struct rcu_head rcu;
struct hotplug_slot hotplug_slot;
+ struct mutex state_lock; /* protect state changes */
enum zpci_state state;
u32 fid; /* function ID, used by sclp */
u32 fh; /* function handle, used by insn's */
+ u32 gisa; /* GISA designation for passthrough */
u16 vfn; /* virtual function number */
u16 pchid; /* physical channel ID */
+ u16 maxstbl; /* Maximum store block size */
u8 pfgid; /* function group ID */
u8 pft; /* pci function type */
u8 port;
+ u8 dtsm; /* Supported DT mask */
u8 rid_available : 1;
u8 has_hp_slot : 1;
u8 has_resources : 1;
@@ -137,7 +143,6 @@ struct zpci_dev {
u8 reserved : 2;
unsigned int devfn; /* DEVFN part of the RID*/
- struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
@@ -152,16 +157,8 @@ struct zpci_dev {
/* DMA stuff */
unsigned long *dma_table;
- spinlock_t dma_table_lock;
int tlb_refresh;
- spinlock_t iommu_bitmap_lock;
- unsigned long *iommu_bitmap;
- unsigned long *lazy_bitmap;
- unsigned long iommu_size;
- unsigned long iommu_pages;
- unsigned int next_bit;
-
struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16];
@@ -173,20 +170,20 @@ struct zpci_dev {
u64 dma_mask; /* DMA address space mask */
/* Function measurement block */
+ struct mutex fmb_lock;
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
u16 fmb_length;
- /* software counters */
- atomic64_t allocated_pages;
- atomic64_t mapped_pages;
- atomic64_t unmapped_pages;
u8 version;
enum pci_bus_speed max_bus_speed;
struct dentry *debugfs_dev;
+ /* IOMMU and passthrough */
struct s390_domain *s390_domain; /* s390 IOMMU domain data */
+ struct kvm_zdev *kzdev;
+ struct mutex kzdev_lock;
};
static inline bool zdev_enabled(struct zpci_dev *zdev)
@@ -198,6 +195,9 @@ extern const struct attribute_group *zpci_attr_groups[];
extern unsigned int s390_pci_force_floating __initdata;
extern unsigned int s390_pci_no_rid;
+extern union zpci_sic_iib *zpci_aipb;
+extern struct airq_iv *zpci_aif_sbv;
+
/* -----------------------------------------------------------------------------
Prototypes
----------------------------------------------------------------------------- */
@@ -211,7 +211,7 @@ void zpci_device_reserved(struct zpci_dev *zdev);
bool zpci_is_device_configured(struct zpci_dev *zdev);
int zpci_hot_reset_device(struct zpci_dev *zdev);
-int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
+int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *);
int zpci_unregister_ioat(struct zpci_dev *, u8);
void zpci_remove_reserved_devices(void);
void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index 1f4b666e85ee..f0c677ddd270 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -50,6 +50,9 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
+/* PCI function type numbers */
+#define PCI_FUNC_TYPE_ISM 0x5 /* ISM device */
+
extern bool zpci_unique_uid;
struct clp_rsp_slpc_pci {
@@ -153,9 +156,11 @@ struct clp_rsp_query_pci_grp {
u8 : 6;
u8 frame : 1;
u8 refresh : 1; /* TLB refresh mode */
- u16 reserved2;
+ u16 : 3;
+ u16 maxstbl : 13; /* Maximum store block size */
u16 mui;
- u16 : 16;
+ u8 dtsm; /* Supported DT mask */
+ u8 reserved3;
u16 maxfaal;
u16 : 4;
u16 dnoi : 12;
@@ -173,7 +178,8 @@ struct clp_req_set_pci {
u16 reserved2;
u8 oc; /* operation controls */
u8 ndas; /* number of dma spaces */
- u64 reserved3;
+ u32 reserved3;
+ u32 gisa; /* GISA designation */
} __packed;
/* Set PCI function response */
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 91e63426bdc5..42d7cc4262ca 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -82,116 +82,16 @@ enum zpci_ioat_dtype {
#define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200
-static inline unsigned int calc_rtx(dma_addr_t ptr)
-{
- return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
-}
-
-static inline unsigned int calc_sx(dma_addr_t ptr)
-{
- return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
-}
-
-static inline unsigned int calc_px(dma_addr_t ptr)
-{
- return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
-}
-
-static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
-{
- *entry &= ZPCI_PTE_FLAG_MASK;
- *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
-}
-
-static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
-{
- *entry &= ZPCI_RTE_FLAG_MASK;
- *entry |= (sto & ZPCI_RTE_ADDR_MASK);
- *entry |= ZPCI_TABLE_TYPE_RTX;
-}
-
-static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
-{
- *entry &= ZPCI_STE_FLAG_MASK;
- *entry |= (pto & ZPCI_STE_ADDR_MASK);
- *entry |= ZPCI_TABLE_TYPE_SX;
-}
-
-static inline void validate_rt_entry(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_VALID_MASK;
- *entry &= ~ZPCI_TABLE_OFFSET_MASK;
- *entry |= ZPCI_TABLE_VALID;
- *entry |= ZPCI_TABLE_LEN_RTX;
-}
-
-static inline void validate_st_entry(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_VALID_MASK;
- *entry |= ZPCI_TABLE_VALID;
-}
-
-static inline void invalidate_pt_entry(unsigned long *entry)
-{
- WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
- *entry &= ~ZPCI_PTE_VALID_MASK;
- *entry |= ZPCI_PTE_INVALID;
-}
-
-static inline void validate_pt_entry(unsigned long *entry)
-{
- WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
- *entry &= ~ZPCI_PTE_VALID_MASK;
- *entry |= ZPCI_PTE_VALID;
-}
-
-static inline void entry_set_protected(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_PROT_MASK;
- *entry |= ZPCI_TABLE_PROTECTED;
-}
-
-static inline void entry_clr_protected(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_PROT_MASK;
- *entry |= ZPCI_TABLE_UNPROTECTED;
-}
-
-static inline int reg_entry_isvalid(unsigned long entry)
-{
- return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
-}
-
-static inline int pt_entry_isvalid(unsigned long entry)
-{
- return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
-}
-
-static inline unsigned long *get_rt_sto(unsigned long entry)
-{
- if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
- return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
- else
- return NULL;
-
-}
-
-static inline unsigned long *get_st_pto(unsigned long entry)
-{
- if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
- return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
- else
- return NULL;
-}
-
-/* Prototypes */
-void dma_free_seg_table(unsigned long);
-unsigned long *dma_alloc_cpu_table(void);
-void dma_cleanup_tables(unsigned long *);
-unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
-void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
-
-extern const struct dma_map_ops s390_pci_dma_ops;
+struct zpci_iommu_ctrs {
+ atomic64_t mapped_pages;
+ atomic64_t unmapped_pages;
+ atomic64_t global_rpcits;
+ atomic64_t sync_map_rpcits;
+ atomic64_t sync_rpcits;
+};
+
+struct zpci_dev;
+struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev);
#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 61cf9531f68f..e5f57cfe1d45 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -98,6 +98,15 @@ struct zpci_fib {
u32 gd;
} __packed __aligned(8);
+/* Set Interruption Controls Operation Controls */
+#define SIC_IRQ_MODE_ALL 0
+#define SIC_IRQ_MODE_SINGLE 1
+#define SIC_SET_AENI_CONTROLS 2
+#define SIC_IRQ_MODE_DIRECT 4
+#define SIC_IRQ_MODE_D_ALL 16
+#define SIC_IRQ_MODE_D_SINGLE 17
+#define SIC_IRQ_MODE_SET_CPU 18
+
/* directed interruption information block */
struct zpci_diib {
u32 : 1;
@@ -119,9 +128,20 @@ struct zpci_cdiib {
u64 : 64;
} __packed __aligned(8);
+/* adapter interruption parameters block */
+struct zpci_aipb {
+ u64 faisb;
+ u64 gait;
+ u16 : 13;
+ u16 afi : 3;
+ u32 : 32;
+ u16 faal;
+} __packed __aligned(8);
+
union zpci_sic_iib {
struct zpci_diib diib;
struct zpci_cdiib cdiib;
+ struct zpci_aipb aipb;
};
DECLARE_STATIC_KEY_FALSE(have_mio);
@@ -134,13 +154,6 @@ int __zpci_store(u64 data, u64 req, u64 offset);
int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len);
int __zpci_store_block(const u64 *data, u64 req, u64 offset);
void zpci_barrier(void);
-int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib);
-
-static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc)
-{
- union zpci_sic_iib iib = {{0}};
-
- return __zpci_set_irq_ctrl(ctl, isc, &iib);
-}
+int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib);
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 287bb88f7698..2686bee800e3 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -11,6 +11,8 @@
/* I/O size constraints */
#define ZPCI_MAX_READ_SIZE 8
#define ZPCI_MAX_WRITE_SIZE 128
+#define ZPCI_BOUNDARY_SIZE (1 << 12)
+#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
@@ -125,16 +127,18 @@ out:
int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len);
-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
{
- int count = len > max ? max : len, size = 1;
+ int offset = dst & ZPCI_BOUNDARY_MASK;
+ int size;
- while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
- dst = dst >> 1;
- src = src >> 1;
- size = size << 1;
- }
- return size;
+ size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
+ if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
+ return size;
+
+ if (size >= 8)
+ return 8;
+ return rounddown_pow_of_two(size);
}
static inline int zpci_memcpy_fromio(void *dst,
@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0;
while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) src,
- (u64) dst, n,
- ZPCI_MAX_READ_SIZE);
+ size = zpci_get_max_io_size((u64 __force) src,
+ (u64) dst, n,
+ ZPCI_MAX_READ_SIZE);
rc = zpci_read_single(dst, src, size);
if (rc)
break;
@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL;
while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) dst,
- (u64) src, n,
- ZPCI_MAX_WRITE_SIZE);
+ size = zpci_get_max_io_size((u64 __force) dst,
+ (u64) src, n,
+ ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = zpci_write_block(dst, src, size);
else
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index cb5fc0690435..264095dd84bc 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -31,7 +31,7 @@
pcp_op_T__ *ptr__; \
preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
- prev__ = *ptr__; \
+ prev__ = READ_ONCE(*ptr__); \
do { \
old__ = prev__; \
new__ = old__ op (val); \
@@ -148,6 +148,22 @@
#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg64(pcp, o, n) this_cpu_cmpxchg_8(pcp, o, n)
+
+#define this_cpu_cmpxchg128(pcp, oval, nval) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ u128 old__, new__, ret__; \
+ pcp_op_T__ *ptr__; \
+ old__ = oval; \
+ new__ = nval; \
+ preempt_disable_notrace(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ ret__ = cmpxchg128((void *)ptr__, old__, new__); \
+ preempt_enable_notrace(); \
+ ret__; \
+})
+
#define arch_this_cpu_xchg(pcp, nval) \
({ \
typeof(pcp) *ptr__; \
@@ -164,24 +180,6 @@
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
-({ \
- typeof(pcp1) *p1__; \
- typeof(pcp2) *p2__; \
- int ret__; \
- \
- preempt_disable_notrace(); \
- p1__ = raw_cpu_ptr(&(pcp1)); \
- p2__ = raw_cpu_ptr(&(pcp2)); \
- ret__ = __cmpxchg_double((unsigned long)p1__, (unsigned long)p2__, \
- (unsigned long)(o1), (unsigned long)(o2), \
- (unsigned long)(n1), (unsigned long)(n2)); \
- preempt_enable_notrace(); \
- ret__; \
-})
-
-#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index b9da71632827..9917e2717b2b 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -60,7 +60,6 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0
@@ -71,7 +70,6 @@ struct perf_sf_sde_regs {
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
diff --git a/arch/s390/include/asm/pfault.h b/arch/s390/include/asm/pfault.h
new file mode 100644
index 000000000000..a1bee4a1e470
--- /dev/null
+++ b/arch/s390/include/asm/pfault.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+#ifndef _ASM_S390_PFAULT_H
+#define _ASM_S390_PFAULT_H
+
+#include <linux/errno.h>
+
+int __pfault_init(void);
+void __pfault_fini(void);
+
+static inline int pfault_init(void)
+{
+ if (IS_ENABLED(CONFIG_PFAULT))
+ return __pfault_init();
+ return -EOPNOTSUPP;
+}
+
+static inline void pfault_fini(void)
+{
+ if (IS_ENABLED(CONFIG_PFAULT))
+ __pfault_fini();
+}
+
+#endif /* _ASM_S390_PFAULT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 17eb618f1348..7b84ef6dc4b6 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -23,10 +23,9 @@ unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
-struct page *page_table_alloc_pgste(struct mm_struct *mm);
+struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
void page_table_free(struct mm_struct *, unsigned long *);
-void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
-void page_table_free_pgste(struct page *page);
+void page_table_free_pgste(struct ptdesc *ptdesc);
extern int page_table_allocate_pgste;
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
@@ -86,7 +85,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
if (!table)
return NULL;
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
- if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
+ if (!pagetable_pmd_ctor(virt_to_ptdesc(table))) {
crst_table_free(mm, table);
return NULL;
}
@@ -97,7 +96,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
if (mm_pmd_folded(mm))
return;
- pgtable_pmd_page_dtor(virt_to_page(pmd));
+ pagetable_pmd_dtor(virt_to_ptdesc(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
@@ -143,6 +142,10 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
+/* arch use pte_free_defer() implementation in arch/s390/mm/pgalloc.c */
+#define pte_free_defer pte_free_defer
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+
void vmem_map_init(void);
void *vmem_crst_alloc(unsigned long val);
pte_t *vmem_pte_alloc(void);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index a397b072a580..60950e7a25f5 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -18,13 +18,15 @@
#include <linux/radix-tree.h>
#include <linux/atomic.h>
#include <asm/sections.h>
+#include <asm/ctlreg.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
extern pgd_t swapper_pg_dir[];
+extern pgd_t invalid_pg_dir[];
extern void paging_init(void);
-extern unsigned long s390_invalid_asce;
+extern struct ctlreg s390_invalid_asce;
enum {
PG_DIRECT_MAP_4K = 0,
@@ -33,7 +35,7 @@ enum {
PG_DIRECT_MAP_MAX
};
-extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
static inline void update_page_count(int level, long count)
{
@@ -41,14 +43,12 @@ static inline void update_page_count(int level, long count)
atomic_long_add(count, &direct_pages_count[level]);
}
-struct seq_file;
-void arch_report_meminfo(struct seq_file *m);
-
/*
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
#define update_mmu_cache(vma, address, ptep) do { } while (0)
+#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
/*
@@ -91,8 +91,6 @@ extern unsigned long __bootdata_preserved(VMALLOC_END);
extern struct page *__bootdata_preserved(vmemmap);
extern unsigned long __bootdata_preserved(vmemmap_size);
-#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-
extern unsigned long __bootdata_preserved(MODULES_VADDR);
extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
@@ -181,6 +179,8 @@ static inline int is_module_addr(void *addr)
#define _PAGE_SOFT_DIRTY 0x000
#endif
+#define _PAGE_SW_BITS 0xffUL /* All SW bits */
+
#define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
/* Set of bits not changed in pte_modify */
@@ -188,6 +188,12 @@ static inline int is_module_addr(void *addr)
_PAGE_YOUNG | _PAGE_SOFT_DIRTY)
/*
+ * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
+ * HW bit and all SW bits.
+ */
+#define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS)
+
+/*
* handle_pte_fault uses pte_present and pte_none to find out the pte type
* WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
* distinguish present from not-present ptes. It is changed only with the page
@@ -424,23 +430,6 @@ static inline int is_module_addr(void *addr)
* implies read permission.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RX
-#define __P101 PAGE_RX
-#define __P110 PAGE_RX
-#define __P111 PAGE_RX
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RX
-#define __S101 PAGE_RX
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
/*
* Segment entry (large page) protection definitions.
@@ -494,6 +483,12 @@ static inline int is_module_addr(void *addr)
_REGION3_ENTRY_YOUNG | \
_REGION_ENTRY_PROTECT | \
_REGION_ENTRY_NOEXEC)
+#define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_WRITE | \
+ _REGION3_ENTRY_YOUNG | \
+ _REGION3_ENTRY_DIRTY)
static inline bool mm_p4d_folded(struct mm_struct *mm)
{
@@ -525,7 +520,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
static inline int mm_is_protected(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
- if (unlikely(atomic_read(&mm->context.is_protected)))
+ if (unlikely(atomic_read(&mm->context.protected_count)))
return 1;
#endif
return 0;
@@ -710,23 +705,23 @@ static inline int pud_none(pud_t pud)
return pud_val(pud) == _REGION3_ENTRY_EMPTY;
}
-#define pud_leaf pud_large
-static inline int pud_large(pud_t pud)
+#define pud_leaf pud_leaf
+static inline bool pud_leaf(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
return 0;
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}
-#define pmd_leaf pmd_large
-static inline int pmd_large(pmd_t pmd)
+#define pmd_leaf pmd_leaf
+static inline bool pmd_leaf(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}
static inline int pmd_bad(pmd_t pmd)
{
- if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
return 1;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
@@ -735,7 +730,7 @@ static inline int pud_bad(pud_t pud)
{
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
- if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
+ if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
return 1;
if (type < _REGION_ENTRY_TYPE_R3)
return 0;
@@ -775,11 +770,13 @@ static inline int pud_write(pud_t pud)
return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
}
+#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
@@ -823,12 +820,11 @@ static inline int pte_protnone(pte_t pte)
static inline int pmd_protnone(pmd_t pmd)
{
- /* pmd_large(pmd) implies pmd_present(pmd) */
- return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+ /* pmd_leaf(pmd) implies pmd_present(pmd) */
+ return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
}
#endif
-#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
@@ -1007,7 +1003,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
if (pte_val(pte) & _PAGE_DIRTY)
@@ -1061,6 +1057,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
#define IPTE_NODAT 0x400
#define IPTE_GUEST_ASCE 0x800
+static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
+ unsigned long opt, unsigned long asce,
+ int local)
+{
+ unsigned long pto;
+
+ pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
+ asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
+ : "+m" (*ptep)
+ : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
+ [asce] "a" (asce), [m4] "i" (local));
+}
+
static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
unsigned long opt, unsigned long asce,
int local)
@@ -1182,9 +1191,22 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
} else {
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
- /* At this point the reference through the mapping is still present */
- if (mm_is_protected(mm) && pte_present(res))
- uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
+ /* Nothing to do */
+ if (!mm_is_protected(mm) || !pte_present(res))
+ return res;
+ /*
+ * At this point the reference through the mapping is still present.
+ * The notifier should have destroyed all protected vCPUs at this
+ * point, so the destroy should be successful.
+ */
+ if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
+ return res;
+ /*
+ * If something went wrong and the page could not be destroyed, or
+ * if this is not a mm teardown, the slower export is used as
+ * fallback instead.
+ */
+ uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
return res;
}
@@ -1198,6 +1220,44 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
}
+/*
+ * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
+ * bits in the comparison. Those might change e.g. because of dirty and young
+ * tracking.
+ */
+static inline int pte_allow_rdp(pte_t old, pte_t new)
+{
+ /*
+ * Only allow changes from RO to RW
+ */
+ if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
+ return 0;
+
+ return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
+}
+
+static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ /*
+ * RDP might not have propagated the PTE protection reset to all CPUs,
+ * so there could be spurious TLB protection faults.
+ * NOTE: This will also be called when a racing pagetable update on
+ * another thread already installed the correct PTE. Both cases cannot
+ * really be distinguished.
+ * Therefore, only do the local TLB flush when RDP can be used, and the
+ * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
+ * A local RDP can be used to do the flush.
+ */
+ if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
+ __ptep_rdp(address, ptep, 0, 0, 1);
+}
+#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+
+void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t new);
+
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
@@ -1205,7 +1265,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
{
if (pte_same(*ptep, entry))
return 0;
- ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
+ if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
+ ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
+ else
+ ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
return 1;
}
@@ -1253,21 +1316,37 @@ pgprot_t pgprot_writecombine(pgprot_t prot);
#define pgprot_writethrough pgprot_writethrough
pgprot_t pgprot_writethrough(pgprot_t prot);
+#define PFN_PTE_SHIFT PAGE_SHIFT
+
/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified. Thus, the following
- * hook is made available.
+ * Set multiple PTEs to consecutive pages with a single call. All PTEs
+ * are within the same folio, PMD and VMA.
*/
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t entry)
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry, unsigned int nr)
{
if (pte_present(entry))
entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
- if (mm_has_pgste(mm))
- ptep_set_pte_at(mm, addr, ptep, entry);
- else
- set_pte(ptep, entry);
+ if (mm_has_pgste(mm)) {
+ for (;;) {
+ ptep_set_pte_at(mm, addr, ptep, entry);
+ if (--nr == 0)
+ break;
+ ptep++;
+ entry = __pte(pte_val(entry) + PAGE_SIZE);
+ addr += PAGE_SIZE;
+ }
+ } else {
+ for (;;) {
+ set_pte(ptep, entry);
+ if (--nr == 0)
+ break;
+ ptep++;
+ entry = __pte(pte_val(entry) + PAGE_SIZE);
+ }
+ }
}
+#define set_ptes set_ptes
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -1306,7 +1385,7 @@ static inline unsigned long pmd_deref(pmd_t pmd)
unsigned long origin_mask;
origin_mask = _SEGMENT_ENTRY_ORIGIN;
- if (pmd_large(pmd))
+ if (pmd_leaf(pmd))
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pmd_val(pmd) & origin_mask);
}
@@ -1321,7 +1400,7 @@ static inline unsigned long pud_deref(pud_t pud)
unsigned long origin_mask;
origin_mask = _REGION_ENTRY_ORIGIN;
- if (pud_large(pud))
+ if (pud_leaf(pud))
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pud_val(pud) & origin_mask);
}
@@ -1423,7 +1502,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
{
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
@@ -1777,10 +1856,12 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define kern_addr_valid(addr) (1)
-
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern void vmem_remove_mapping(unsigned long start, unsigned long size);
+extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
+extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
+extern void vmem_unmap_4k_page(unsigned long addr);
+extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
extern int s390_enable_sie(void);
extern int s390_enable_skey(void);
extern void s390_reset_cmma(struct mm_struct *mm);
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
new file mode 100644
index 000000000000..e747b067f8db
--- /dev/null
+++ b/arch/s390/include/asm/physmem_info.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+#include <asm/page.h>
+
+enum physmem_info_source {
+ MEM_DETECT_NONE = 0,
+ MEM_DETECT_SCLP_STOR_INFO,
+ MEM_DETECT_DIAG260,
+ MEM_DETECT_SCLP_READ_INFO,
+ MEM_DETECT_BIN_SEARCH
+};
+
+struct physmem_range {
+ u64 start;
+ u64 end;
+};
+
+enum reserved_range_type {
+ RR_DECOMPRESSOR,
+ RR_INITRD,
+ RR_VMLINUX,
+ RR_RELOC,
+ RR_AMODE31,
+ RR_IPLREPORT,
+ RR_CERT_COMP_LIST,
+ RR_MEM_DETECT_EXTENDED,
+ RR_VMEM,
+ RR_MAX
+};
+
+struct reserved_range {
+ unsigned long start;
+ unsigned long end;
+ struct reserved_range *chain;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
+ * If more physmem_ranges are required, a block of memory from already
+ * known physmem_range is taken (online_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct physmem_info {
+ u32 range_count;
+ u8 info_source;
+ unsigned long usable;
+ struct reserved_range reserved[RR_MAX];
+ struct physmem_range online[MEM_INLINED_ENTRIES];
+ struct physmem_range *online_extended;
+};
+
+extern struct physmem_info physmem_info;
+
+void add_physmem_online_range(u64 start, u64 end);
+
+static inline int __get_physmem_range(u32 n, unsigned long *start,
+ unsigned long *end, bool respect_usable_limit)
+{
+ if (n >= physmem_info.range_count) {
+ *start = 0;
+ *end = 0;
+ return -1;
+ }
+
+ if (n < MEM_INLINED_ENTRIES) {
+ *start = (unsigned long)physmem_info.online[n].start;
+ *end = (unsigned long)physmem_info.online[n].end;
+ } else {
+ *start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
+ *end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
+ }
+
+ if (respect_usable_limit && physmem_info.usable) {
+ if (*start >= physmem_info.usable)
+ return -1;
+ if (*end > physmem_info.usable)
+ *end = physmem_info.usable;
+ }
+ return 0;
+}
+
+/**
+ * for_each_physmem_usable_range - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges below usable limit.
+ */
+#define for_each_physmem_usable_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
+
+/* Walks over all detected online memory ranges disregarding usable limit. */
+#define for_each_physmem_online_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
+
+static inline const char *get_physmem_info_source(void)
+{
+ switch (physmem_info.info_source) {
+ case MEM_DETECT_SCLP_STOR_INFO:
+ return "sclp storage info";
+ case MEM_DETECT_DIAG260:
+ return "diag260";
+ case MEM_DETECT_SCLP_READ_INFO:
+ return "sclp read info";
+ case MEM_DETECT_BIN_SEARCH:
+ return "binary search";
+ }
+ return "none";
+}
+
+#define RR_TYPE_NAME(t) case RR_ ## t: return #t
+static inline const char *get_rr_type_name(enum reserved_range_type t)
+{
+ switch (t) {
+ RR_TYPE_NAME(DECOMPRESSOR);
+ RR_TYPE_NAME(INITRD);
+ RR_TYPE_NAME(VMLINUX);
+ RR_TYPE_NAME(AMODE31);
+ RR_TYPE_NAME(IPLREPORT);
+ RR_TYPE_NAME(CERT_COMP_LIST);
+ RR_TYPE_NAME(MEM_DETECT_EXTENDED);
+ RR_TYPE_NAME(VMEM);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
+ for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
+ range && range->end; range = range->chain ? __va(range->chain) : NULL, \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
+ struct reserved_range *range)
+{
+ if (!range) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ if (range->chain)
+ return __va(range->chain);
+ while (++*t < RR_MAX) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ return NULL;
+}
+
+#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
+ for (t = 0, range = __physmem_reserved_next(&t, NULL), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
+ range; range = __physmem_reserved_next(&t, range), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
+ unsigned long *addr, unsigned long *size)
+{
+ *addr = physmem_info.reserved[type].start;
+ *size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
+ return *size;
+}
+
+#endif
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index dd3d20c332ac..47d80a7451a6 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -2,7 +2,7 @@
/*
* Kernelspace interface to the pkey device driver
*
- * Copyright IBM Corp. 2016,2019
+ * Copyright IBM Corp. 2016, 2023
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@@ -23,6 +23,6 @@
* @return 0 on success, negative errno value on failure
*/
int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- struct pkey_protkey *protkey);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bd66f8e34949..db9982f0e8cd 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,14 +14,14 @@
#include <linux/bits.h>
+#define CIF_SIE 0 /* CPU needs SIE exit cleanup */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
-#define CIF_FPU 3 /* restore FPU registers */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
+#define _CIF_SIE BIT(CIF_SIE)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
-#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
@@ -33,40 +33,56 @@
#include <linux/cpumask.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <asm/fpu-types.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
-#include <asm/fpu/types.h>
-#include <asm/fpu/internal.h>
#include <asm/irqflags.h>
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
-static inline void set_cpu_flag(int flag)
+static __always_inline void set_cpu_flag(int flag)
{
S390_lowcore.cpu_flags |= (1UL << flag);
}
-static inline void clear_cpu_flag(int flag)
+static __always_inline void clear_cpu_flag(int flag)
{
S390_lowcore.cpu_flags &= ~(1UL << flag);
}
-static inline int test_cpu_flag(int flag)
+static __always_inline bool test_cpu_flag(int flag)
{
- return !!(S390_lowcore.cpu_flags & (1UL << flag));
+ return S390_lowcore.cpu_flags & (1UL << flag);
+}
+
+static __always_inline bool test_and_set_cpu_flag(int flag)
+{
+ if (test_cpu_flag(flag))
+ return true;
+ set_cpu_flag(flag);
+ return false;
+}
+
+static __always_inline bool test_and_clear_cpu_flag(int flag)
+{
+ if (!test_cpu_flag(flag))
+ return false;
+ clear_cpu_flag(flag);
+ return true;
}
/*
* Test CIF flag of another CPU. The caller needs to ensure that
* CPU hotplug can not happen, e.g. by disabling preemption.
*/
-static inline int test_cpu_flag_of(int flag, int cpu)
+static __always_inline bool test_cpu_flag_of(int flag, int cpu)
{
struct lowcore *lc = lowcore_ptr[cpu];
- return !!(lc->cpu_flags & (1UL << flag));
+
+ return lc->cpu_flags & (1UL << flag);
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
@@ -82,7 +98,6 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
-extern void __bpon(void);
unsigned long vdso_size(void);
/*
@@ -102,6 +117,41 @@ unsigned long vdso_size(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#define __stackleak_poison __stackleak_poison
+static __always_inline void __stackleak_poison(unsigned long erase_low,
+ unsigned long erase_high,
+ unsigned long poison)
+{
+ unsigned long tmp, count;
+
+ count = erase_high - erase_low;
+ if (!count)
+ return;
+ asm volatile(
+ " cghi %[count],8\n"
+ " je 2f\n"
+ " aghi %[count],-(8+1)\n"
+ " srlg %[tmp],%[count],8\n"
+ " ltgr %[tmp],%[tmp]\n"
+ " jz 1f\n"
+ "0: stg %[poison],0(%[addr])\n"
+ " mvc 8(256-8,%[addr]),0(%[addr])\n"
+ " la %[addr],256(%[addr])\n"
+ " brctg %[tmp],0b\n"
+ "1: stg %[poison],0(%[addr])\n"
+ " larl %[tmp],3f\n"
+ " ex %[count],0(%[tmp])\n"
+ " j 4f\n"
+ "2: stg %[poison],0(%[addr])\n"
+ " j 4f\n"
+ "3: mvc 8(1,%[addr]),0(%[addr])\n"
+ "4:\n"
+ : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
+ : [poison] "d" (poison)
+ : "memory", "cc"
+ );
+}
+
/*
* Thread structure
*/
@@ -118,6 +168,8 @@ struct thread_struct {
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ int ufpu_flags; /* user fpu flags */
+ int kfpu_flags; /* kernel fpu flags */
/* Per-thread information related to debugging */
struct per_regs per_user; /* User specified PER registers */
@@ -133,11 +185,8 @@ struct thread_struct {
struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
- /*
- * Warning: 'fpu' is dynamically-sized. It *MUST* be at
- * the end.
- */
- struct fpu fpu; /* FP and VX register save area */
+ struct fpu ufpu; /* User FP and VX register save area */
+ struct fpu kfpu; /* Kernel FP and VX register save area */
};
/* Flag to disable transactions. */
@@ -156,7 +205,6 @@ typedef struct thread_struct thread_struct;
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
- .fpu.regs = (void *) init_task.thread.fpu.fprs, \
.last_break = 1, \
}
@@ -177,7 +225,6 @@ typedef struct thread_struct thread_struct;
execve_tail(); \
} while (0)
-/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct seq_file;
@@ -186,9 +233,6 @@ struct pt_regs;
void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *tsk) { }
-
/* Free guarded storage control block */
void guarded_storage_release(struct task_struct *tsk);
void gs_load_bc_cb(struct pt_regs *regs);
@@ -202,7 +246,23 @@ unsigned long __get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
-register unsigned long current_stack_pointer asm("r15");
+/* avoid using global register due to gcc bug in versions < 8.4 */
+#define current_stack_pointer (__current_stack_pointer())
+
+static __always_inline unsigned long __current_stack_pointer(void)
+{
+ unsigned long sp;
+
+ asm volatile("lgr %0,15" : "=d" (sp));
+ return sp;
+}
+
+static __always_inline bool on_thread_stack(void)
+{
+ unsigned long ksp = S390_lowcore.kernel_stack;
+
+ return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
static __always_inline unsigned short stap(void)
{
@@ -268,14 +328,36 @@ static inline unsigned long __extract_psw(void)
return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
}
-static inline void local_mcck_enable(void)
+static inline unsigned long __local_mcck_save(void)
{
- __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
+ unsigned long mask = __extract_psw();
+
+ __load_psw_mask(mask & ~PSW_MASK_MCHECK);
+ return mask & PSW_MASK_MCHECK;
+}
+
+#define local_mcck_save(mflags) \
+do { \
+ typecheck(unsigned long, mflags); \
+ mflags = __local_mcck_save(); \
+} while (0)
+
+static inline void local_mcck_restore(unsigned long mflags)
+{
+ unsigned long mask = __extract_psw();
+
+ mask &= ~PSW_MASK_MCHECK;
+ __load_psw_mask(mask | mflags);
}
static inline void local_mcck_disable(void)
{
- __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
+ __local_mcck_save();
+}
+
+static inline void local_mcck_enable(void)
+{
+ __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
}
/*
@@ -306,26 +388,6 @@ static __always_inline void __noreturn disabled_wait(void)
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
-extern int memcpy_real(void *, unsigned long, size_t);
-extern void memcpy_absolute(void *, void *, size_t);
-
-#define put_abs_lowcore(member, x) do { \
- unsigned long __abs_address = offsetof(struct lowcore, member); \
- __typeof__(((struct lowcore *)0)->member) __tmp = (x); \
- \
- memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp)); \
-} while (0)
-
-#define get_abs_lowcore(x, member) do { \
- unsigned long __abs_address = offsetof(struct lowcore, member); \
- __typeof__(((struct lowcore *)0)->member) *__ptr = &(x); \
- \
- memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr)); \
-} while (0)
-
-extern int s390_isolate_bp(void);
-extern int s390_isolate_bp_guest(void);
-
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->psw.mask);
diff --git a/arch/s390/include/asm/ptdump.h b/arch/s390/include/asm/ptdump.h
deleted file mode 100644
index f960b2896606..000000000000
--- a/arch/s390/include/asm/ptdump.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _ASM_S390_PTDUMP_H
-#define _ASM_S390_PTDUMP_H
-
-void ptdump_check_wx(void);
-
-static inline void debug_checkwx(void)
-{
- if (IS_ENABLED(CONFIG_DEBUG_WX))
- ptdump_check_wx();
-}
-
-#endif /* _ASM_S390_PTDUMP_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 8bae33ab320a..2ad9324f6338 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -14,23 +14,47 @@
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
-#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
-#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
-#ifndef __ASSEMBLY__
+#define PSW32_MASK_PER _AC(0x40000000, UL)
+#define PSW32_MASK_DAT _AC(0x04000000, UL)
+#define PSW32_MASK_IO _AC(0x02000000, UL)
+#define PSW32_MASK_EXT _AC(0x01000000, UL)
+#define PSW32_MASK_KEY _AC(0x00F00000, UL)
+#define PSW32_MASK_BASE _AC(0x00080000, UL) /* Always one */
+#define PSW32_MASK_MCHECK _AC(0x00040000, UL)
+#define PSW32_MASK_WAIT _AC(0x00020000, UL)
+#define PSW32_MASK_PSTATE _AC(0x00010000, UL)
+#define PSW32_MASK_ASC _AC(0x0000C000, UL)
+#define PSW32_MASK_CC _AC(0x00003000, UL)
+#define PSW32_MASK_PM _AC(0x00000f00, UL)
+#define PSW32_MASK_RI _AC(0x00000080, UL)
+
+#define PSW32_ADDR_AMODE _AC(0x80000000, UL)
+#define PSW32_ADDR_INSN _AC(0x7FFFFFFF, UL)
+
+#define PSW32_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 20)
+
+#define PSW32_ASC_PRIMARY _AC(0x00000000, UL)
+#define PSW32_ASC_ACCREG _AC(0x00004000, UL)
+#define PSW32_ASC_SECONDARY _AC(0x00008000, UL)
+#define PSW32_ASC_HOME _AC(0x0000C000, UL)
+
+#define PSW_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 52)
#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
- PSW_MASK_EA | PSW_MASK_BA)
+ PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_DAT)
#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
+#ifndef __ASSEMBLY__
+
struct psw_bits {
unsigned long : 1;
unsigned long per : 1; /* PER-Mask */
@@ -71,30 +95,6 @@ enum {
&(*(struct psw_bits *)(&(__psw))); \
}))
-#define PSW32_MASK_PER 0x40000000UL
-#define PSW32_MASK_DAT 0x04000000UL
-#define PSW32_MASK_IO 0x02000000UL
-#define PSW32_MASK_EXT 0x01000000UL
-#define PSW32_MASK_KEY 0x00F00000UL
-#define PSW32_MASK_BASE 0x00080000UL /* Always one */
-#define PSW32_MASK_MCHECK 0x00040000UL
-#define PSW32_MASK_WAIT 0x00020000UL
-#define PSW32_MASK_PSTATE 0x00010000UL
-#define PSW32_MASK_ASC 0x0000C000UL
-#define PSW32_MASK_CC 0x00003000UL
-#define PSW32_MASK_PM 0x00000f00UL
-#define PSW32_MASK_RI 0x00000080UL
-
-#define PSW32_ADDR_AMODE 0x80000000UL
-#define PSW32_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20)
-
-#define PSW32_ASC_PRIMARY 0x00000000UL
-#define PSW32_ASC_ACCREG 0x00004000UL
-#define PSW32_ASC_SECONDARY 0x00008000UL
-#define PSW32_ASC_HOME 0x0000C000UL
-
typedef struct {
unsigned int mask;
unsigned int addr;
@@ -201,6 +201,10 @@ static inline int test_and_clear_pt_regs_flag(struct pt_regs *regs, int flag)
return ret;
}
+struct task_struct;
+
+void update_cr_regs(struct task_struct *task);
+
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 2f983e0b95e0..69c4ead0c332 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -9,8 +9,9 @@
#define __QDIO_H__
#include <linux/interrupt.h>
-#include <asm/cio.h>
+#include <asm/dma-types.h>
#include <asm/ccwdev.h>
+#include <asm/cio.h>
/* only use 4 queues to save some cachelines */
#define QDIO_MAX_QUEUES_PER_IRQ 4
@@ -34,9 +35,9 @@
* @dkey: access key for SLSB
*/
struct qdesfmt0 {
- u64 sliba;
- u64 sla;
- u64 slsba;
+ dma64_t sliba;
+ dma64_t sla;
+ dma64_t slsba;
u32 : 32;
u32 akey : 4;
u32 bkey : 4;
@@ -74,7 +75,7 @@ struct qdr {
/* private: */
u32 res[9];
/* public: */
- u64 qiba;
+ dma64_t qiba;
u32 : 32;
u32 qkey : 4;
u32 : 28;
@@ -146,7 +147,7 @@ struct qaob {
u8 flags;
u16 cbtbs;
u8 sb_count;
- u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ dma64_t sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
u64 user0;
u64 res4[2];
@@ -208,7 +209,7 @@ struct qdio_buffer_element {
u8 scount;
u8 sflags;
u32 length;
- u64 addr;
+ dma64_t addr;
} __attribute__ ((packed, aligned(16)));
/**
@@ -224,7 +225,7 @@ struct qdio_buffer {
* @sbal: absolute SBAL address
*/
struct sl_element {
- u64 sbal;
+ dma64_t sbal;
} __attribute__ ((packed));
/**
diff --git a/arch/s390/include/asm/rwonce.h b/arch/s390/include/asm/rwonce.h
new file mode 100644
index 000000000000..91fc24520e82
--- /dev/null
+++ b/arch/s390/include/asm/rwonce.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_S390_RWONCE_H
+#define __ASM_S390_RWONCE_H
+
+#include <linux/compiler_types.h>
+
+/*
+ * Use READ_ONCE_ALIGNED_128() for 128-bit block concurrent (atomic) read
+ * accesses. Note that x must be 128-bit aligned, otherwise a specification
+ * exception is generated.
+ */
+#define READ_ONCE_ALIGNED_128(x) \
+({ \
+ union { \
+ typeof(x) __x; \
+ __uint128_t val; \
+ } __u; \
+ \
+ BUILD_BUG_ON(sizeof(x) != 16); \
+ asm volatile( \
+ " lpq %[val],%[_x]\n" \
+ : [val] "=d" (__u.val) \
+ : [_x] "QS" (x) \
+ : "memory"); \
+ __u.__x; \
+})
+
+#include <asm-generic/rwonce.h>
+
+#endif /* __ASM_S390_RWONCE_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 236b34b75ddb..5742d23bba13 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -17,6 +17,7 @@
#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
#ifndef __ASSEMBLY__
+#include <linux/uio.h>
#include <asm/chpid.h>
#include <asm/cpu.h>
@@ -85,9 +86,15 @@ struct sclp_info {
unsigned char has_kss : 1;
unsigned char has_gisaf : 1;
unsigned char has_diag318 : 1;
+ unsigned char has_diag320 : 1;
unsigned char has_sipl : 1;
+ unsigned char has_sipl_eckd : 1;
unsigned char has_dirq : 1;
unsigned char has_iplcc : 1;
+ unsigned char has_zpci_lsi : 1;
+ unsigned char has_aisii : 1;
+ unsigned char has_aeni : 1;
+ unsigned char has_aisi : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
@@ -126,6 +133,7 @@ void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_emergency_printk(const char *s);
int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size);
@@ -142,8 +150,7 @@ int sclp_pci_deconfigure(u32 fid);
int sclp_ap_configure(u32 apid);
int sclp_ap_deconfigure(u32 apid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
-int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
-int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
+size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count);
void sclp_ocf_cpc_name_copy(char *dst);
static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 7ce584aff5bb..56003e26cdbf 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/css_chars.h>
+#include <asm/dma-types.h>
#include <asm/cio.h>
/**
@@ -53,7 +54,7 @@ struct cmd_scsw {
__u32 fctl : 3;
__u32 actl : 7;
__u32 stctl : 5;
- __u32 cpa;
+ dma32_t cpa;
__u32 dstat : 8;
__u32 cstat : 8;
__u32 count : 16;
@@ -93,7 +94,7 @@ struct tm_scsw {
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
- u32 tcw;
+ dma32_t tcw;
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
@@ -125,7 +126,7 @@ struct eadm_scsw {
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
- u32 aob;
+ dma32_t aob;
u32 dstat:8;
u32 cstat:8;
u32:16;
@@ -215,6 +216,11 @@ union scsw {
#define SNS2_ENV_DATA_PRESENT 0x10
#define SNS2_INPRECISE_END 0x04
+/*
+ * architectured values for PPRC errors
+ */
+#define SNS7_INVALID_ON_SEC 0x0e
+
/**
* scsw_is_tm - check for transport mode scsw
* @scsw: pointer to scsw
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 3fecaa4e8b74..0486e6ef62bf 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -23,7 +23,7 @@
*/
#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
-extern unsigned long __samode31, __eamode31;
-extern unsigned long __stext_amode31, __etext_amode31;
+extern char *__samode31, *__eamode31;
+extern char *__stext_amode31, *__etext_amode31;
#endif
diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h
deleted file mode 100644
index aaf85a69061c..000000000000
--- a/arch/s390/include/asm/serial.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_SERIAL_H
-#define _ASM_S390_SERIAL_H
-
-#define BASE_BAUD 0
-
-#endif /* _ASM_S390_SERIAL_H */
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 950d87bd997a..06fbabe2f66c 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -6,37 +6,61 @@
extern struct mutex cpa_mutex;
-#define SET_MEMORY_RO 1UL
-#define SET_MEMORY_RW 2UL
-#define SET_MEMORY_NX 4UL
-#define SET_MEMORY_X 8UL
-#define SET_MEMORY_4K 16UL
+enum {
+ _SET_MEMORY_RO_BIT,
+ _SET_MEMORY_RW_BIT,
+ _SET_MEMORY_NX_BIT,
+ _SET_MEMORY_X_BIT,
+ _SET_MEMORY_4K_BIT,
+ _SET_MEMORY_INV_BIT,
+ _SET_MEMORY_DEF_BIT,
+};
-int __set_memory(unsigned long addr, int numpages, unsigned long flags);
+#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT)
+#define SET_MEMORY_RW BIT(_SET_MEMORY_RW_BIT)
+#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT)
+#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT)
+#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT)
+#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
+#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
-static inline int set_memory_ro(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RO);
-}
+int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags);
-static inline int set_memory_rw(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RW);
-}
+#define set_memory_rox set_memory_rox
-static inline int set_memory_nx(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_NX);
+/*
+ * Generate two variants of each set_memory() function:
+ *
+ * set_memory_yy(unsigned long addr, int numpages);
+ * __set_memory_yy(void *start, void *end);
+ *
+ * The second variant exists for both convenience to avoid the usual
+ * (unsigned long) casts, but unlike the first variant it can also be used
+ * for areas larger than 8TB, which may happen at memory initialization.
+ */
+#define __SET_MEMORY_FUNC(fname, flags) \
+static inline int fname(unsigned long addr, int numpages) \
+{ \
+ return __set_memory(addr, numpages, (flags)); \
+} \
+ \
+static inline int __##fname(void *start, void *end) \
+{ \
+ unsigned long numpages; \
+ \
+ numpages = (end - start) >> PAGE_SHIFT; \
+ return __set_memory((unsigned long)start, numpages, (flags)); \
}
-static inline int set_memory_x(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_X);
-}
+__SET_MEMORY_FUNC(set_memory_ro, SET_MEMORY_RO)
+__SET_MEMORY_FUNC(set_memory_rw, SET_MEMORY_RW)
+__SET_MEMORY_FUNC(set_memory_nx, SET_MEMORY_NX)
+__SET_MEMORY_FUNC(set_memory_x, SET_MEMORY_X)
+__SET_MEMORY_FUNC(set_memory_rox, SET_MEMORY_RO | SET_MEMORY_X)
+__SET_MEMORY_FUNC(set_memory_rwnx, SET_MEMORY_RW | SET_MEMORY_NX)
+__SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
-static inline int set_memory_4k(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_4K);
-}
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 77e6506898f5..03bcaa8effb2 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -28,12 +28,12 @@
#define MACHINE_FLAG_TOPOLOGY BIT(10)
#define MACHINE_FLAG_TE BIT(11)
#define MACHINE_FLAG_TLB_LC BIT(12)
-#define MACHINE_FLAG_VX BIT(13)
#define MACHINE_FLAG_TLB_GUEST BIT(14)
#define MACHINE_FLAG_NX BIT(15)
#define MACHINE_FLAG_GS BIT(16)
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
+#define MACHINE_FLAG_RDP BIT(19)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@@ -71,8 +71,8 @@ extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_INFLATE_ONLY 3
#define ZLIB_DFLTCC_FULL_DEBUG 4
-extern int noexec_disabled;
extern unsigned long ident_map_size;
+extern unsigned long max_mappable;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
@@ -89,12 +89,12 @@ extern unsigned long mio_wb_bit_mask;
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
-#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST)
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
+#define MACHINE_HAS_RDP (S390_lowcore.machine_flags & MACHINE_FLAG_RDP)
/*
* Console mode. Override with conmode=
@@ -115,14 +115,6 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
-#ifdef CONFIG_PFAULT
-extern int pfault_init(void);
-extern void pfault_fini(void);
-#else /* CONFIG_PFAULT */
-#define pfault_init() ({-1;})
-#define pfault_fini() do { } while (0)
-#endif /* CONFIG_PFAULT */
-
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else
@@ -131,9 +123,6 @@ static inline void vmcp_cma_reserve(void) { }
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault);
-void cmma_init(void);
-void cmma_init_nodat(void);
-
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
@@ -144,13 +133,13 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
-extern int is_full_image;
-
-struct initrd_data {
- unsigned long start;
- unsigned long size;
-};
-extern struct initrd_data initrd_data;
+extern int __kaslr_enabled;
+static inline int kaslr_enabled(void)
+{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return __kaslr_enabled;
+ return 0;
+}
struct oldmem_data {
unsigned long start;
@@ -158,7 +147,7 @@ struct oldmem_data {
};
extern struct oldmem_data oldmem_data;
-static inline u32 gen_lpswe(unsigned long addr)
+static __always_inline u32 gen_lpswe(unsigned long addr)
{
BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr;
diff --git a/arch/s390/include/asm/shmparam.h b/arch/s390/include/asm/shmparam.h
deleted file mode 100644
index e75d45649c54..000000000000
--- a/arch/s390/include/asm/shmparam.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/shmparam.h"
- */
-#ifndef _ASM_S390_SHMPARAM_H
-#define _ASM_S390_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _ASM_S390_SHMPARAM_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 7f5d4763357b..6e5b1b4b19a9 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -30,7 +30,8 @@ extern void smp_emergency_stop(void);
extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
-extern void smp_save_dump_cpus(void);
+extern void smp_save_dump_ipl_cpu(void);
+extern void smp_save_dump_secondary_cpus(void);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h
index fd17f25704bd..1ac5115d3115 100644
--- a/arch/s390/include/asm/softirq_stack.h
+++ b/arch/s390/include/asm/softirq_stack.h
@@ -5,9 +5,10 @@
#include <asm/lowcore.h>
#include <asm/stacktrace.h>
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static inline void do_softirq_own_stack(void)
{
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
}
-
+#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index b23c658dce77..433fde85b14e 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -4,7 +4,13 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>
-#include <asm/switch_to.h>
+
+struct stack_frame_user {
+ unsigned long back_chain;
+ unsigned long empty1[5];
+ unsigned long gprs[10];
+ unsigned long empty2[4];
+};
enum stack_type {
STACK_TYPE_UNKNOWN,
@@ -46,6 +52,7 @@ struct stack_frame {
unsigned long sie_savearea;
unsigned long sie_reason;
unsigned long sie_flags;
+ unsigned long sie_control_block_phys;
};
};
unsigned long gprs[10];
@@ -188,17 +195,53 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
(rettype)r2; \
})
-#define call_on_stack_noreturn(fn, stack) \
+/*
+ * Use call_nodat() to call a function with DAT disabled.
+ * Proper sign and zero extension of function arguments is done.
+ * Usage:
+ *
+ * rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
+ *
+ * - nr specifies the number of function arguments of fn.
+ * - fn is the function to be called, where fn is a physical address.
+ * - rettype is the return type of fn.
+ * - t1, a1, ... are pairs, where t1 must match the type of the first
+ * argument of fn, t2 the second, etc. a1 is the corresponding
+ * first function argument (not name), etc.
+ *
+ * fn() is called with standard C function call ABI, with the exception
+ * that no useful stackframe or stackpointer is passed via register 15.
+ * Therefore the called function must not use r15 to access the stack.
+ */
+#define call_nodat(nr, rettype, fn, ...) \
({ \
- void (*__fn)(void) = fn; \
+ rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
+ /* aligned since psw_leave must not cross page boundary */ \
+ psw_t __aligned(16) psw_leave; \
+ psw_t psw_enter; \
+ CALL_LARGS_##nr(__VA_ARGS__); \
+ CALL_REGS_##nr; \
\
+ CALL_TYPECHECK_##nr(__VA_ARGS__); \
+ psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
+ psw_enter.addr = (unsigned long)__fn; \
asm volatile( \
- " la 15,0(%[_stack])\n" \
- " xc %[_bc](8,15),%[_bc](15)\n" \
- " brasl 14,%[_fn]\n" \
- ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
- [_stack] "a" (stack), [_fn] "X" (__fn)); \
- BUG(); \
+ " epsw 0,1\n" \
+ " risbg 1,0,0,31,32\n" \
+ " larl 7,1f\n" \
+ " stg 1,%[psw_leave]\n" \
+ " stg 7,8+%[psw_leave]\n" \
+ " la 7,%[psw_leave]\n" \
+ " lra 7,0(7)\n" \
+ " larl 1,0f\n" \
+ " lra 14,0(1)\n" \
+ " lpswe %[psw_enter]\n" \
+ "0: lpswe 0(7)\n" \
+ "1:\n" \
+ : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
+ : [psw_enter] "Q" (psw_enter) \
+ : "7", CALL_CLOBBER_##nr); \
+ (rettype)r2; \
})
#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 3fae93ddb322..351685de53d2 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -55,18 +55,6 @@ char *strstr(const char *s1, const char *s2);
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
-extern void *__memcpy(void *dest, const void *src, size_t n);
-extern void *__memset(void *s, int c, size_t n);
-extern void *__memmove(void *dest, const void *src, size_t n);
-
-/*
- * For files that are not instrumented (e.g. mm/slub.c) we
- * should use not instrumented version of mem* functions.
- */
-
-#define memcpy(dst, src, len) __memcpy(dst, src, len)
-#define memmove(dst, src, len) __memmove(dst, src, len)
-#define memset(s, c, n) __memset(s, c, n)
#define strlen(s) __strlen(s)
#define __no_sanitize_prefix_strfunc(x) __##x
@@ -79,6 +67,9 @@ extern void *__memmove(void *dest, const void *src, size_t n);
#define __no_sanitize_prefix_strfunc(x) x
#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
+void *__memcpy(void *dest, const void *src, size_t n);
+void *__memset(void *s, int c, size_t n);
+void *__memmove(void *dest, const void *src, size_t n);
void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count);
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
deleted file mode 100644
index c61b2cc1a8a8..000000000000
--- a/arch/s390/include/asm/switch_to.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 1999, 2009
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef __ASM_SWITCH_TO_H
-#define __ASM_SWITCH_TO_H
-
-#include <linux/thread_info.h>
-#include <asm/fpu/api.h>
-#include <asm/ptrace.h>
-#include <asm/guarded_storage.h>
-
-extern struct task_struct *__switch_to(void *, void *);
-extern void update_cr_regs(struct task_struct *task);
-
-static inline void save_access_regs(unsigned int *acrs)
-{
- typedef struct { int _[NUM_ACRS]; } acrstype;
-
- asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
-}
-
-static inline void restore_access_regs(unsigned int *acrs)
-{
- typedef struct { int _[NUM_ACRS]; } acrstype;
-
- asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
-}
-
-#define switch_to(prev, next, last) do { \
- /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
- * a restore of the floating point / vector registers as \
- * soon as the next task returns to user space \
- */ \
- save_fpu_regs(); \
- save_access_regs(&prev->thread.acrs[0]); \
- save_ri_cb(prev->thread.ri_cb); \
- save_gs_cb(prev->thread.gs_cb); \
- update_cr_regs(next); \
- restore_access_regs(&next->thread.acrs[0]); \
- restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
- restore_gs_cb(next->thread.gs_cb); \
- prev = __switch_to(prev, next); \
-} while (0)
-
-#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
index fde7e6b1df48..35c1d1b860d8 100644
--- a/arch/s390/include/asm/syscall_wrapper.h
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -7,36 +7,13 @@
#ifndef _ASM_S390_SYSCALL_WRAPPER_H
#define _ASM_S390_SYSCALL_WRAPPER_H
-#define __SC_TYPE(t, a) t
-
-#define SYSCALL_PT_ARG6(regs, m, t1, t2, t3, t4, t5, t6)\
- SYSCALL_PT_ARG5(regs, m, t1, t2, t3, t4, t5), \
- m(t6, (regs->gprs[7]))
-
-#define SYSCALL_PT_ARG5(regs, m, t1, t2, t3, t4, t5) \
- SYSCALL_PT_ARG4(regs, m, t1, t2, t3, t4), \
- m(t5, (regs->gprs[6]))
-
-#define SYSCALL_PT_ARG4(regs, m, t1, t2, t3, t4) \
- SYSCALL_PT_ARG3(regs, m, t1, t2, t3), \
- m(t4, (regs->gprs[5]))
-
-#define SYSCALL_PT_ARG3(regs, m, t1, t2, t3) \
- SYSCALL_PT_ARG2(regs, m, t1, t2), \
- m(t3, (regs->gprs[4]))
-
-#define SYSCALL_PT_ARG2(regs, m, t1, t2) \
- SYSCALL_PT_ARG1(regs, m, t1), \
- m(t2, (regs->gprs[3]))
-
-#define SYSCALL_PT_ARG1(regs, m, t1) \
- m(t1, (regs->orig_gpr2))
-
-#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
+/* Mapping of registers to parameters for syscalls */
+#define SC_S390_REGS_TO_ARGS(x, ...) \
+ __MAP(x, __SC_ARGS \
+ ,, regs->orig_gpr2,, regs->gprs[3],, regs->gprs[4] \
+ ,, regs->gprs[5],, regs->gprs[6],, regs->gprs[7])
#ifdef CONFIG_COMPAT
-#define __SC_COMPAT_TYPE(t, a) \
- __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
#define __SC_COMPAT_CAST(t, a) \
({ \
@@ -56,110 +33,108 @@
(t)__ReS; \
})
-#define __S390_SYS_STUBx(x, name, ...) \
- long __s390_sys##name(struct pt_regs *regs); \
- ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
- long __s390_sys##name(struct pt_regs *regs) \
- { \
- long ret = __do_sys##name(SYSCALL_PT_ARGS(x, regs, \
- __SC_COMPAT_CAST, __MAP(x, __SC_TYPE, __VA_ARGS__))); \
- __MAP(x,__SC_TEST,__VA_ARGS__); \
- return ret; \
- }
-
/*
* To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
* named __s390x_sys_*()
*/
#define COMPAT_SYSCALL_DEFINE0(sname) \
- SYSCALL_METADATA(_##sname, 0); \
long __s390_compat_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \
long __s390_compat_sys_##sname(void)
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
+ long __s390_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__s390_sys_##sname, ERRNO); \
long __s390x_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \
+ static inline long __do_sys_##sname(void); \
long __s390_sys_##sname(void) \
- __attribute__((alias(__stringify(__s390x_sys_##sname)))); \
- long __s390x_sys_##sname(void)
+ { \
+ return __do_sys_##sname(); \
+ } \
+ long __s390x_sys_##sname(void) \
+ { \
+ return __do_sys_##sname(); \
+ } \
+ static inline long __do_sys_##sname(void)
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name); \
cond_syscall(__s390_sys_##name)
-#define SYS_NI(name) \
- SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
- SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
-
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
- __diag_push(); \
- __diag_ignore(GCC, 8, "-Wattribute-alias", \
- "Type aliasing is used to sanitize syscall arguments"); \
long __s390_compat_sys##name(struct pt_regs *regs); \
- long __s390_compat_sys##name(struct pt_regs *regs) \
- __attribute__((alias(__stringify(__se_compat_sys##name)))); \
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
- static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- long __se_compat_sys##name(struct pt_regs *regs); \
- long __se_compat_sys##name(struct pt_regs *regs) \
+ static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \
+ long __s390_compat_sys##name(struct pt_regs *regs) \
+ { \
+ return __se_compat_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \
+ } \
+ static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \
{ \
- long ret = __do_compat_sys##name(SYSCALL_PT_ARGS(x, regs, __SC_DELOUSE, \
- __MAP(x, __SC_TYPE, __VA_ARGS__))); \
- __MAP(x,__SC_TEST,__VA_ARGS__); \
- return ret; \
+ __MAP(x, __SC_TEST, __VA_ARGS__); \
+ return __do_compat_sys##name(__MAP(x, __SC_DELOUSE, __VA_ARGS__)); \
} \
- __diag_pop(); \
- static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+ static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__))
/*
* As some compat syscalls may not be implemented, we need to expand
- * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
- * kernel/time/posix-stubs.c to cover this case as well.
+ * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
cond_syscall(__s390_compat_sys_##name)
-#define COMPAT_SYS_NI(name) \
- SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
+#define __S390_SYS_STUBx(x, name, ...) \
+ long __s390_sys##name(struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
+ static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \
+ long __s390_sys##name(struct pt_regs *regs) \
+ { \
+ return ___se_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \
+ } \
+ static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \
+ { \
+ __MAP(x, __SC_TEST, __VA_ARGS__); \
+ return __do_sys##name(__MAP(x, __SC_COMPAT_CAST, __VA_ARGS__)); \
+ }
#else /* CONFIG_COMPAT */
-#define __S390_SYS_STUBx(x, fullname, name, ...)
-
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
long __s390x_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \
- long __s390x_sys_##sname(void)
+ static inline long __do_sys_##sname(void); \
+ long __s390x_sys_##sname(void) \
+ { \
+ return __do_sys_##sname(); \
+ } \
+ static inline long __do_sys_##sname(void)
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name)
-#define SYS_NI(name) \
- SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers);
+#define __S390_SYS_STUBx(x, fullname, name, ...)
#endif /* CONFIG_COMPAT */
-#define __SYSCALL_DEFINEx(x, name, ...) \
- __diag_push(); \
- __diag_ignore(GCC, 8, "-Wattribute-alias", \
- "Type aliasing is used to sanitize syscall arguments"); \
- long __s390x_sys##name(struct pt_regs *regs) \
- __attribute__((alias(__stringify(__se_sys##name)))); \
- ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
- static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- long __se_sys##name(struct pt_regs *regs); \
- __S390_SYS_STUBx(x, name, __VA_ARGS__) \
- long __se_sys##name(struct pt_regs *regs) \
- { \
- long ret = __do_sys##name(SYSCALL_PT_ARGS(x, regs, \
- __SC_CAST, __MAP(x, __SC_TYPE, __VA_ARGS__))); \
- __MAP(x,__SC_TEST,__VA_ARGS__); \
- return ret; \
- } \
- __diag_pop(); \
- static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ long __s390x_sys##name(struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
+ static inline long __se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \
+ __S390_SYS_STUBx(x, name, __VA_ARGS__); \
+ long __s390x_sys##name(struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \
+ } \
+ static inline long __se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \
+ { \
+ __MAP(x, __SC_TEST, __VA_ARGS__); \
+ return __do_sys##name(__MAP(x, __SC_CAST, __VA_ARGS__)); \
+ } \
+ static inline long __do_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__))
#endif /* _ASM_S390_SYSCALL_WRAPPER_H */
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index ab1c6316055c..edca5a751df4 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -40,6 +40,10 @@ struct sysinfo_1_1_1 {
unsigned int ncr;
unsigned int npr;
unsigned int ntr;
+ char reserved_3[4];
+ char model_var_cap[16];
+ unsigned int model_var_cap_rating;
+ unsigned int nvr;
};
struct sysinfo_1_2_1 {
diff --git a/arch/s390/include/asm/termios.h b/arch/s390/include/asm/termios.h
deleted file mode 100644
index 46fa3020b41e..000000000000
--- a/arch/s390/include/asm/termios.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/termios.h"
- */
-#ifndef _S390_TERMIOS_H
-#define _S390_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-
-#include <asm-generic/termios-base.h>
-
-#endif /* _S390_TERMIOS_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b2ffcb4fe000..a674c7d25da5 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,6 +9,9 @@
#define _ASM_THREAD_INFO_H
#include <linux/bits.h>
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
/*
* General size of kernel stacks
@@ -21,13 +24,12 @@
#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/page.h>
-#define STACK_INIT_OFFSET \
- (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
-
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
@@ -50,9 +52,6 @@ struct thread_info {
struct task_struct;
-void arch_release_task_struct(struct task_struct *tsk);
-int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
-
void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
@@ -70,7 +69,6 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
-#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
@@ -94,7 +92,6 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
-#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index ce878e85b6e4..4d646659a5f5 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -63,7 +63,7 @@ static inline int store_tod_clock_ext_cc(union tod_clock *clk)
return cc;
}
-static inline void store_tod_clock_ext(union tod_clock *tod)
+static __always_inline void store_tod_clock_ext(union tod_clock *tod)
{
asm volatile("stcke %0" : "=Q" (*tod) : : "cc");
}
@@ -177,7 +177,7 @@ static inline void local_tick_enable(unsigned long comp)
typedef unsigned long cycles_t;
-static inline unsigned long get_tod_clock(void)
+static __always_inline unsigned long get_tod_clock(void)
{
union tod_clock clk;
@@ -204,6 +204,11 @@ void init_cpu_timer(void);
extern union tod_clock tod_clock_base;
+static __always_inline unsigned long __get_tod_clock_monotonic(void)
+{
+ return get_tod_clock() - tod_clock_base.tod;
+}
+
/**
* get_clock_monotonic - returns current time in clock rate units
*
@@ -216,7 +221,7 @@ static inline unsigned long get_tod_clock_monotonic(void)
unsigned long tod;
preempt_disable_notrace();
- tod = get_tod_clock() - tod_clock_base.tod;
+ tod = __get_tod_clock_monotonic();
preempt_enable_notrace();
return tod;
}
@@ -240,7 +245,7 @@ static inline unsigned long get_tod_clock_monotonic(void)
* -> ns = (th * 125) + ((tl * 125) >> 9);
*
*/
-static inline unsigned long tod_to_ns(unsigned long todval)
+static __always_inline unsigned long tod_to_ns(unsigned long todval)
{
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
}
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 3a5c8fb590e5..e95b2c8081eb 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -25,7 +25,9 @@
void __tlb_remove_table(void *_table);
static inline void tlb_flush(struct mmu_gather *tlb);
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size);
+ struct page *page, bool delay_rmap, int page_size);
+static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
+ struct page *page, unsigned int nr_pages, bool delay_rmap);
#define tlb_flush tlb_flush
#define pte_free_tlb pte_free_tlb
@@ -40,14 +42,33 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
* Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
+ *
+ * s390 doesn't delay rmap removal.
*/
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
+ struct page *page, bool delay_rmap, int page_size)
{
+ VM_WARN_ON_ONCE(delay_rmap);
+
free_page_and_swap_cache(page);
return false;
}
+static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
+ struct page *page, unsigned int nr_pages, bool delay_rmap)
+{
+ struct encoded_page *encoded_pages[] = {
+ encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT),
+ encode_nr_pages(nr_pages),
+ };
+
+ VM_WARN_ON_ONCE(delay_rmap);
+ VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1));
+
+ free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages));
+ return false;
+}
+
static inline void tlb_flush(struct mmu_gather *tlb)
{
__tlb_flush_mm_lazy(tlb->mm);
@@ -64,12 +85,9 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_pmds = 1;
- /*
- * page_table_free_rcu takes care of the allocation bit masks
- * of the 2K table fragments in the 4K page table page,
- * then calls tlb_remove_table.
- */
- page_table_free_rcu(tlb, (unsigned long *) pte, address);
+ if (mm_alloc_pgste(tlb->mm))
+ gmap_unlink(tlb->mm, (unsigned long *)pte, address);
+ tlb_remove_ptdesc(tlb, pte);
}
/*
@@ -84,12 +102,12 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
{
if (mm_pmd_folded(tlb->mm))
return;
- pgtable_pmd_page_dtor(virt_to_page(pmd));
+ pagetable_pmd_dtor(virt_to_ptdesc(pmd));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
- tlb_remove_table(tlb, pmd);
+ tlb_remove_ptdesc(tlb, pmd);
}
/*
@@ -107,7 +125,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb_remove_table(tlb, p4d);
+ tlb_remove_ptdesc(tlb, p4d);
}
/*
@@ -125,7 +143,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
- tlb_remove_table(tlb, pud);
+ tlb_remove_ptdesc(tlb, pud);
}
diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h
index 1ac538b8cbf5..f76e5fdff23a 100644
--- a/arch/s390/include/asm/tpi.h
+++ b/arch/s390/include/asm/tpi.h
@@ -19,6 +19,19 @@ struct tpi_info {
u32 :12;
} __packed __aligned(4);
+/* I/O-Interruption Code as stored by TPI for an Adapter I/O */
+struct tpi_adapter_info {
+ u32 aism:8;
+ u32 :22;
+ u32 error:1;
+ u32 forward:1;
+ u32 reserved;
+ u32 adapter_IO:1;
+ u32 directed_irq:1;
+ u32 isc:3;
+ u32 :27;
+} __packed __aligned(4);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_TPI_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index c2c9995466e0..81ae8a98e7ec 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -15,7 +15,6 @@
*/
#include <asm/asm-extable.h>
#include <asm/processor.h>
-#include <asm/ctl_reg.h>
#include <asm/extable.h>
#include <asm/facility.h>
#include <asm-generic/access_ok.h>
@@ -285,7 +284,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return __clear_user(to, n);
}
-int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count);
void *s390_kernel_write(void *dst, const void *src, size_t size);
int __noreturn __put_kernel_bad(void);
@@ -391,4 +389,212 @@ do { \
goto err_label; \
} while (0)
+void __cmpxchg_user_key_called_with_bad_pointer(void);
+
+#define CMPXCHG_USER_KEY_MAX_LOOPS 128
+
+static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
+ __uint128_t old, __uint128_t new,
+ unsigned long key, int size)
+{
+ int rc = 0;
+
+ switch (size) {
+ case 1: {
+ unsigned int prev, shift, mask, _old, _new;
+ unsigned long count;
+
+ shift = (3 ^ (address & 3)) << 3;
+ address ^= address & 3;
+ _old = ((unsigned int)old & 0xff) << shift;
+ _new = ((unsigned int)new & 0xff) << shift;
+ mask = ~(0xff << shift);
+ asm volatile(
+ " spka 0(%[key])\n"
+ " sacf 256\n"
+ " llill %[count],%[max_loops]\n"
+ "0: l %[prev],%[address]\n"
+ "1: nr %[prev],%[mask]\n"
+ " xilf %[mask],0xffffffff\n"
+ " or %[new],%[prev]\n"
+ " or %[prev],%[tmp]\n"
+ "2: lr %[tmp],%[prev]\n"
+ "3: cs %[prev],%[new],%[address]\n"
+ "4: jnl 5f\n"
+ " xr %[tmp],%[prev]\n"
+ " xr %[new],%[tmp]\n"
+ " nr %[tmp],%[mask]\n"
+ " jnz 5f\n"
+ " brct %[count],2b\n"
+ "5: sacf 768\n"
+ " spka %[default_key]\n"
+ EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
+ : [rc] "+&d" (rc),
+ [prev] "=&d" (prev),
+ [address] "+Q" (*(int *)address),
+ [tmp] "+&d" (_old),
+ [new] "+&d" (_new),
+ [mask] "+&d" (mask),
+ [count] "=a" (count)
+ : [key] "%[count]" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY),
+ [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
+ : "memory", "cc");
+ *(unsigned char *)uval = prev >> shift;
+ if (!count)
+ rc = -EAGAIN;
+ return rc;
+ }
+ case 2: {
+ unsigned int prev, shift, mask, _old, _new;
+ unsigned long count;
+
+ shift = (2 ^ (address & 2)) << 3;
+ address ^= address & 2;
+ _old = ((unsigned int)old & 0xffff) << shift;
+ _new = ((unsigned int)new & 0xffff) << shift;
+ mask = ~(0xffff << shift);
+ asm volatile(
+ " spka 0(%[key])\n"
+ " sacf 256\n"
+ " llill %[count],%[max_loops]\n"
+ "0: l %[prev],%[address]\n"
+ "1: nr %[prev],%[mask]\n"
+ " xilf %[mask],0xffffffff\n"
+ " or %[new],%[prev]\n"
+ " or %[prev],%[tmp]\n"
+ "2: lr %[tmp],%[prev]\n"
+ "3: cs %[prev],%[new],%[address]\n"
+ "4: jnl 5f\n"
+ " xr %[tmp],%[prev]\n"
+ " xr %[new],%[tmp]\n"
+ " nr %[tmp],%[mask]\n"
+ " jnz 5f\n"
+ " brct %[count],2b\n"
+ "5: sacf 768\n"
+ " spka %[default_key]\n"
+ EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
+ : [rc] "+&d" (rc),
+ [prev] "=&d" (prev),
+ [address] "+Q" (*(int *)address),
+ [tmp] "+&d" (_old),
+ [new] "+&d" (_new),
+ [mask] "+&d" (mask),
+ [count] "=a" (count)
+ : [key] "%[count]" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY),
+ [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
+ : "memory", "cc");
+ *(unsigned short *)uval = prev >> shift;
+ if (!count)
+ rc = -EAGAIN;
+ return rc;
+ }
+ case 4: {
+ unsigned int prev = old;
+
+ asm volatile(
+ " spka 0(%[key])\n"
+ " sacf 256\n"
+ "0: cs %[prev],%[new],%[address]\n"
+ "1: sacf 768\n"
+ " spka %[default_key]\n"
+ EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
+ : [rc] "+&d" (rc),
+ [prev] "+&d" (prev),
+ [address] "+Q" (*(int *)address)
+ : [new] "d" ((unsigned int)new),
+ [key] "a" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY)
+ : "memory", "cc");
+ *(unsigned int *)uval = prev;
+ return rc;
+ }
+ case 8: {
+ unsigned long prev = old;
+
+ asm volatile(
+ " spka 0(%[key])\n"
+ " sacf 256\n"
+ "0: csg %[prev],%[new],%[address]\n"
+ "1: sacf 768\n"
+ " spka %[default_key]\n"
+ EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
+ : [rc] "+&d" (rc),
+ [prev] "+&d" (prev),
+ [address] "+QS" (*(long *)address)
+ : [new] "d" ((unsigned long)new),
+ [key] "a" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY)
+ : "memory", "cc");
+ *(unsigned long *)uval = prev;
+ return rc;
+ }
+ case 16: {
+ __uint128_t prev = old;
+
+ asm volatile(
+ " spka 0(%[key])\n"
+ " sacf 256\n"
+ "0: cdsg %[prev],%[new],%[address]\n"
+ "1: sacf 768\n"
+ " spka %[default_key]\n"
+ EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
+ : [rc] "+&d" (rc),
+ [prev] "+&d" (prev),
+ [address] "+QS" (*(__int128_t *)address)
+ : [new] "d" (new),
+ [key] "a" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY)
+ : "memory", "cc");
+ *(__uint128_t *)uval = prev;
+ return rc;
+ }
+ }
+ __cmpxchg_user_key_called_with_bad_pointer();
+ return rc;
+}
+
+/**
+ * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
+ * @ptr: User space address of value to compare to @old and exchange with
+ * @new. Must be aligned to sizeof(*@ptr).
+ * @uval: Address where the old value of *@ptr is written to.
+ * @old: Old value. Compared to the content pointed to by @ptr in order to
+ * determine if the exchange occurs. The old value read from *@ptr is
+ * written to *@uval.
+ * @new: New value to place at *@ptr.
+ * @key: Access key to use for checking storage key protection.
+ *
+ * Perform a cmpxchg on a user space target, honoring storage key protection.
+ * @key alone determines how key checking is performed, neither
+ * storage-protection-override nor fetch-protection-override apply.
+ * The caller must compare *@uval and @old to determine if values have been
+ * exchanged. In case of an exception *@uval is set to zero.
+ *
+ * Return: 0: cmpxchg executed
+ * -EFAULT: an exception happened when trying to access *@ptr
+ * -EAGAIN: maxed out number of retries (byte and short only)
+ */
+#define cmpxchg_user_key(ptr, uval, old, new, key) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(uval) __uval = (uval); \
+ \
+ BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \
+ might_fault(); \
+ __chk_user_ptr(__ptr); \
+ __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
+ (old), (new), (key), sizeof(*(__ptr))); \
+})
+
#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index 0bf06f1682d8..b8ecf04e3468 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -4,7 +4,7 @@
#include <linux/sched.h>
#include <linux/ftrace.h>
-#include <linux/kprobes.h>
+#include <linux/rethook.h>
#include <linux/llist.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
@@ -43,13 +43,15 @@ struct unwind_state {
bool error;
};
-/* Recover the return address modified by kretprobe and ftrace_graph. */
+/* Recover the return address modified by rethook and ftrace_graph. */
static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state,
unsigned long ip)
{
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
- if (is_kretprobe_trampoline(ip))
- ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur);
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *)state->sp);
+#ifdef CONFIG_RETHOOK
+ if (is_rethook_trampoline(ip))
+ ip = rethook_find_ret_addr(state->task, state->sp, &state->kr_cur);
+#endif
return ip;
}
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index cfea7b77a5b8..0e7bd3873907 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -34,6 +34,7 @@
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
+#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
#define UVC_CMD_CREATE_SEC_CPU 0x0120
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
@@ -50,9 +51,16 @@
#define UVC_CMD_SET_UNSHARE_ALL 0x0340
#define UVC_CMD_PIN_PAGE_SHARED 0x0341
#define UVC_CMD_UNPIN_PAGE_SHARED 0x0342
+#define UVC_CMD_DUMP_INIT 0x0400
+#define UVC_CMD_DUMP_CONF_STOR_STATE 0x0401
+#define UVC_CMD_DUMP_CPU 0x0402
+#define UVC_CMD_DUMP_COMPLETE 0x0403
#define UVC_CMD_SET_SHARED_ACCESS 0x1000
#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
#define UVC_CMD_RETR_ATTEST 0x1020
+#define UVC_CMD_ADD_SECRET 0x1031
+#define UVC_CMD_LIST_SECRETS 0x1033
+#define UVC_CMD_LOCK_SECRETS 0x1034
/* Bits in installed uv calls */
enum uv_cmds_inst {
@@ -77,12 +85,22 @@ enum uv_cmds_inst {
BIT_UVC_CMD_UNSHARE_ALL = 20,
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
+ BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
+ BIT_UVC_CMD_DUMP_INIT = 24,
+ BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
+ BIT_UVC_CMD_DUMP_CPU = 26,
+ BIT_UVC_CMD_DUMP_COMPLETE = 27,
BIT_UVC_CMD_RETR_ATTEST = 28,
+ BIT_UVC_CMD_ADD_SECRET = 29,
+ BIT_UVC_CMD_LIST_SECRETS = 30,
+ BIT_UVC_CMD_LOCK_SECRETS = 31,
};
enum uv_feat_ind {
BIT_UV_FEAT_MISC = 0,
BIT_UV_FEAT_AIV = 1,
+ BIT_UV_FEAT_AP = 4,
+ BIT_UV_FEAT_AP_INTR = 5,
};
struct uv_cb_header {
@@ -107,10 +125,24 @@ struct uv_cb_qui {
u32 reserved70[3]; /* 0x0070 */
u32 max_num_sec_conf; /* 0x007c */
u64 max_guest_stor_addr; /* 0x0080 */
- u8 reserved88[158 - 136]; /* 0x0088 */
+ u8 reserved88[0x9e - 0x88]; /* 0x0088 */
u16 max_guest_cpu_id; /* 0x009e */
u64 uv_feature_indications; /* 0x00a0 */
- u8 reserveda8[200 - 168]; /* 0x00a8 */
+ u64 reserveda8; /* 0x00a8 */
+ u64 supp_se_hdr_versions; /* 0x00b0 */
+ u64 supp_se_hdr_pcf; /* 0x00b8 */
+ u64 reservedc0; /* 0x00c0 */
+ u64 conf_dump_storage_state_len; /* 0x00c8 */
+ u64 conf_dump_finalize_len; /* 0x00d0 */
+ u64 reservedd8; /* 0x00d8 */
+ u64 supp_att_req_hdr_ver; /* 0x00e0 */
+ u64 supp_att_pflags; /* 0x00e8 */
+ u64 reservedf0; /* 0x00f0 */
+ u64 supp_add_secret_req_ver; /* 0x00f8 */
+ u64 supp_add_secret_pcf; /* 0x0100 */
+ u64 supp_secret_types; /* 0x0180 */
+ u16 max_secrets; /* 0x0110 */
+ u8 reserved112[0x120 - 0x112]; /* 0x0112 */
} __packed __aligned(8);
/* Initialize Ultravisor */
@@ -129,7 +161,15 @@ struct uv_cb_cgc {
u64 guest_handle;
u64 conf_base_stor_origin;
u64 conf_virt_stor_origin;
- u64 reserved30;
+ u8 reserved30[6];
+ union {
+ struct {
+ u16 : 14;
+ u16 ap_instr_intr : 1;
+ u16 ap_allow_instr : 1;
+ };
+ u16 raw;
+ } flags;
u64 guest_stor_origin;
u64 guest_stor_len;
u64 guest_sca;
@@ -213,6 +253,14 @@ struct uv_cb_nodata {
u64 reserved20[4];
} __packed __aligned(8);
+/* Destroy Configuration Fast */
+struct uv_cb_destroy_fast {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 handle;
+ u64 reserved20[5];
+} __packed __aligned(8);
+
/* Set Shared Access */
struct uv_cb_share {
struct uv_cb_header header;
@@ -240,6 +288,44 @@ struct uv_cb_attest {
u64 reserved168[4]; /* 0x0168 */
} __packed __aligned(8);
+struct uv_cb_dump_cpu {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 cpu_handle;
+ u64 dump_area_origin;
+ u64 reserved28[5];
+} __packed __aligned(8);
+
+struct uv_cb_dump_stor_state {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 config_handle;
+ u64 dump_area_origin;
+ u64 gaddr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
+struct uv_cb_dump_complete {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 config_handle;
+ u64 dump_area_origin;
+ u64 reserved30[5];
+} __packed __aligned(8);
+
+/*
+ * A common UV call struct for pv guests that contains a single address
+ * Examples:
+ * Add Secret
+ * List Secrets
+ */
+struct uv_cb_guest_addr {
+ struct uv_cb_header header;
+ u64 reserved08[3];
+ u64 addr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
static inline int __uv_call(unsigned long r1, unsigned long r2)
{
int cc;
@@ -307,10 +393,27 @@ struct uv_info {
unsigned int max_num_sec_conf;
unsigned short max_guest_cpu_id;
unsigned long uv_feature_indications;
+ unsigned long supp_se_hdr_ver;
+ unsigned long supp_se_hdr_pcf;
+ unsigned long conf_dump_storage_state_len;
+ unsigned long conf_dump_finalize_len;
+ unsigned long supp_att_req_hdr_ver;
+ unsigned long supp_att_pflags;
+ unsigned long supp_add_secret_req_ver;
+ unsigned long supp_add_secret_pcf;
+ unsigned long supp_secret_types;
+ unsigned short max_secrets;
};
extern struct uv_info uv_info;
+static inline bool uv_has_feature(u8 feature_bit)
+{
+ if (feature_bit >= sizeof(uv_info.uv_feature_indications) * 8)
+ return false;
+ return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
+}
+
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
@@ -377,7 +480,9 @@ static inline int is_prot_virt_host(void)
return prot_virt_host;
}
+int uv_pin_shared(unsigned long paddr);
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
+int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
int uv_destroy_owned_page(unsigned long paddr);
int uv_convert_from_secure(unsigned long paddr);
int uv_convert_owned_from_secure(unsigned long paddr);
@@ -388,6 +493,11 @@ void setup_uv(void);
#define is_prot_virt_host() 0
static inline void setup_uv(void) {}
+static inline int uv_pin_shared(unsigned long paddr)
+{
+ return 0;
+}
+
static inline int uv_destroy_owned_page(unsigned long paddr)
{
return 0;
diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
index 73ee89142666..0e2b40ef69b0 100644
--- a/arch/s390/include/asm/vdso/data.h
+++ b/arch/s390/include/asm/vdso/data.h
@@ -3,7 +3,6 @@
#define __S390_ASM_VDSO_DATA_H
#include <linux/types.h>
-#include <vdso/datapage.h>
struct arch_vdso_data {
__s64 tod_steering_delta;
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
deleted file mode 100644
index 605dc46bac5e..000000000000
--- a/arch/s390/include/asm/vga.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_VGA_H
-#define _ASM_S390_VGA_H
-
-/* Avoid compile errors due to missing asm/vga.h */
-
-#endif /* _ASM_S390_VGA_H */
diff --git a/arch/s390/include/asm/word-at-a-time.h b/arch/s390/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..203acd6e431b
--- /dev/null
+++ b/arch/s390/include/asm/word-at-a-time.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
+#include <asm/asm-extable.h>
+#include <asm/bitsperlong.h>
+
+struct word_at_a_time {
+ const unsigned long bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x7f) }
+
+static inline unsigned long prep_zero_mask(unsigned long val, unsigned long data, const struct word_at_a_time *c)
+{
+ return data;
+}
+
+static inline unsigned long create_zero_mask(unsigned long data)
+{
+ return __fls(data);
+}
+
+static inline unsigned long find_zero(unsigned long data)
+{
+ return (data ^ (BITS_PER_LONG - 1)) >> 3;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->bits) + c->bits;
+
+ *data = ~(mask | val | c->bits);
+ return *data;
+}
+
+static inline unsigned long zero_bytemask(unsigned long data)
+{
+ return ~1UL << data;
+}
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long data;
+
+ asm volatile(
+ "0: lg %[data],0(%[addr])\n"
+ "1: nopr %%r7\n"
+ EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])
+ EX_TABLE_ZEROPAD(1b, 1b, %[data], %[addr])
+ : [data] "=d" (data)
+ : [addr] "a" (addr), "m" (*(unsigned long *)addr));
+ return data;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/s390/include/uapi/asm/cmb.h b/arch/s390/include/uapi/asm/cmb.h
index ecbe94941403..115434ab98fb 100644
--- a/arch/s390/include/uapi/asm/cmb.h
+++ b/arch/s390/include/uapi/asm/cmb.h
@@ -31,7 +31,7 @@
struct cmbdata {
__u64 size;
__u64 elapsed_time;
- /* basic and exended format: */
+ /* basic and extended format: */
__u64 ssch_rsch_count;
__u64 sample_count;
__u64 device_connect_time;
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index 9ec86fae9980..b11d98800458 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -24,7 +24,7 @@
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
- * including foramt and featueres.
+ * including format and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
@@ -78,6 +78,7 @@ typedef struct dasd_information2_t {
* 0x040: give access to raw eckd data
* 0x080: enable discard support
* 0x100: enable autodisable for IFCC errors (default)
+ * 0x200: enable requeue of all requests on autoquiesce
*/
#define DASD_FEATURE_READONLY 0x001
#define DASD_FEATURE_USEDIAG 0x002
@@ -88,6 +89,7 @@ typedef struct dasd_information2_t {
#define DASD_FEATURE_USERAW 0x040
#define DASD_FEATURE_DISCARD 0x080
#define DASD_FEATURE_PATH_AUTODISABLE 0x100
+#define DASD_FEATURE_REQUEUEQUIESCE 0x200
#define DASD_FEATURE_DEFAULT DASD_FEATURE_PATH_AUTODISABLE
#define DASD_PARTN_BITS 2
@@ -183,6 +185,18 @@ typedef struct format_data_t {
} format_data_t;
/*
+ * struct dasd_copypair_swap_data_t
+ * represents all data necessary to issue a swap of the copy pair relation
+ */
+struct dasd_copypair_swap_data_t {
+ char primary[20]; /* BUSID of primary */
+ char secondary[20]; /* BUSID of secondary */
+
+ /* Reserved for future updates. */
+ __u8 reserved[64];
+};
+
+/*
* values to be used for format_data_t.intensity
* 0/8: normal format
* 1/9: also write record zero
@@ -326,6 +340,8 @@ struct dasd_snid_ioctl_data {
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
/* Release Allocated Space */
#define BIODASDRAS _IOW(DASD_IOCTL_LETTER, 3, format_data_t)
+/* Swap copy pair relation */
+#define BIODASDCOPYPAIRSWAP _IOW(DASD_IOCTL_LETTER, 4, struct dasd_copypair_swap_data_t)
/* Get Sense Path Group ID (SNID) data */
#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
diff --git a/arch/s390/include/uapi/asm/fs3270.h b/arch/s390/include/uapi/asm/fs3270.h
new file mode 100644
index 000000000000..c4bc1108af6a
--- /dev/null
+++ b/arch/s390/include/uapi/asm/fs3270.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_S390_UAPI_FS3270_H
+#define __ASM_S390_UAPI_FS3270_H
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+/* ioctls for fullscreen 3270 */
+#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
+#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
+#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
+#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
+#define TUBGETMOD _IO('3', 13) /* get characteristics like model, cols, rows */
+
+/* For TUBGETMOD */
+struct raw3270_iocb {
+ __u16 model;
+ __u16 line_cnt;
+ __u16 col_cnt;
+ __u16 pf_cnt;
+ __u16 re_cnt;
+ __u16 map;
+};
+
+#endif /* __ASM_S390_UAPI_FS3270_H */
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
index d1ecd5d722a0..2cd28af50dd4 100644
--- a/arch/s390/include/uapi/asm/ipl.h
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -27,6 +27,7 @@ enum ipl_pbt {
IPL_PBT_FCP = 0,
IPL_PBT_SCP_DATA = 1,
IPL_PBT_CCW = 2,
+ IPL_PBT_ECKD = 3,
IPL_PBT_NVME = 4,
};
@@ -111,6 +112,34 @@ struct ipl_pb0_ccw {
__u8 reserved5[8];
} __packed;
+/* IPL Parameter Block 0 for ECKD */
+struct ipl_pb0_eckd {
+ __u32 len;
+ __u8 pbt;
+ __u8 reserved1[3];
+ __u32 reserved2[78];
+ __u8 opt;
+ __u8 reserved4[4];
+ __u8 reserved5:5;
+ __u8 ssid:3;
+ __u16 devno;
+ __u32 reserved6[5];
+ __u32 bootprog;
+ __u8 reserved7[12];
+ struct {
+ __u16 cyl;
+ __u8 head;
+ __u8 record;
+ __u32 reserved;
+ } br_chr __packed;
+ __u32 scp_data_len;
+ __u8 reserved8[260];
+ __u8 scp_data[];
+} __packed;
+
+#define IPL_PB0_ECKD_OPT_IPL 0x10
+#define IPL_PB0_ECKD_OPT_DUMP 0x20
+
#define IPL_PB0_CCW_VM_FLAG_NSS 0x80
#define IPL_PB0_CCW_VM_FLAG_VP 0x40
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 7a6b14874d65..05eaf6db3ad4 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -12,7 +12,320 @@
#include <linux/types.h>
#define __KVM_S390
-#define __KVM_HAVE_GUEST_DEBUG
+
+struct kvm_s390_skeys {
+ __u64 start_gfn;
+ __u64 count;
+ __u64 skeydata_addr;
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+#define KVM_S390_CMMA_PEEK (1 << 0)
+
+/**
+ * kvm_s390_cmma_log - Used for CMMA migration.
+ *
+ * Used both for input and output.
+ *
+ * @start_gfn: Guest page number to start from.
+ * @count: Size of the result buffer.
+ * @flags: Control operation mode via KVM_S390_CMMA_* flags
+ * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
+ * pages are still remaining.
+ * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
+ * in the PGSTE.
+ * @values: Pointer to the values buffer.
+ *
+ * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
+ */
+struct kvm_s390_cmma_log {
+ __u64 start_gfn;
+ __u32 count;
+ __u32 flags;
+ union {
+ __u64 remaining;
+ __u64 mask;
+ };
+ __u64 values;
+};
+
+#define KVM_S390_RESET_POR 1
+#define KVM_S390_RESET_CLEAR 2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT 8
+#define KVM_S390_RESET_IPL 16
+
+/* for KVM_S390_MEM_OP */
+struct kvm_s390_mem_op {
+ /* in */
+ __u64 gaddr; /* the guest address */
+ __u64 flags; /* flags */
+ __u32 size; /* amount of bytes */
+ __u32 op; /* type of operation */
+ __u64 buf; /* buffer in userspace */
+ union {
+ struct {
+ __u8 ar; /* the access register number */
+ __u8 key; /* access key, ignored if flag unset */
+ __u8 pad1[6]; /* ignored */
+ __u64 old_addr; /* ignored if cmpxchg flag unset */
+ };
+ __u32 sida_offset; /* offset into the sida */
+ __u8 reserved[32]; /* ignored */
+ };
+};
+/* types for kvm_s390_mem_op->op */
+#define KVM_S390_MEMOP_LOGICAL_READ 0
+#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+#define KVM_S390_MEMOP_SIDA_READ 2
+#define KVM_S390_MEMOP_SIDA_WRITE 3
+#define KVM_S390_MEMOP_ABSOLUTE_READ 4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
+#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
+
+/* flags for kvm_s390_mem_op->flags */
+#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
+#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
+
+/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
+#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
+#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
+
+struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP 0xfffe0000u
+#define KVM_S390_PROGRAM_INT 0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
+#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
+#define KVM_S390_MCHK 0xfffe1000u
+#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
+#define KVM_S390_INT_CPU_TIMER 0xffff1005u
+#define KVM_S390_INT_VIRTIO 0xffff2603u
+#define KVM_S390_INT_SERVICE 0xffff2401u
+#define KVM_S390_INT_EMERGENCY 0xffff1201u
+#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
+/* Anything below 0xfffe0000u is taken by INT_IO */
+#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
+ (((schid)) | \
+ ((ssid) << 16) | \
+ ((cssid) << 18) | \
+ ((ai) << 26))
+#define KVM_S390_INT_IO_MIN 0x00000000u
+#define KVM_S390_INT_IO_MAX 0xfffdffffu
+#define KVM_S390_INT_IO_AI_MASK 0x04000000u
+
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+struct kvm_s390_io_info {
+ __u16 subchannel_id;
+ __u16 subchannel_nr;
+ __u32 io_int_parm;
+ __u32 io_int_word;
+};
+
+struct kvm_s390_ext_info {
+ __u32 ext_params;
+ __u32 pad;
+ __u64 ext_params2;
+};
+
+struct kvm_s390_pgm_info {
+ __u64 trans_exc_code;
+ __u64 mon_code;
+ __u64 per_address;
+ __u32 data_exc_code;
+ __u16 code;
+ __u16 mon_class_nr;
+ __u8 per_code;
+ __u8 per_atmid;
+ __u8 exc_access_id;
+ __u8 per_access_id;
+ __u8 op_access_id;
+#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
+#define KVM_S390_PGM_FLAGS_ILC_0 0x02
+#define KVM_S390_PGM_FLAGS_ILC_1 0x04
+#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
+#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
+ __u8 flags;
+ __u8 pad[2];
+};
+
+struct kvm_s390_prefix_info {
+ __u32 address;
+};
+
+struct kvm_s390_extcall_info {
+ __u16 code;
+};
+
+struct kvm_s390_emerg_info {
+ __u16 code;
+};
+
+#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
+struct kvm_s390_stop_info {
+ __u32 flags;
+};
+
+struct kvm_s390_mchk_info {
+ __u64 cr14;
+ __u64 mcic;
+ __u64 failing_storage_address;
+ __u32 ext_damage_code;
+ __u32 pad;
+ __u8 fixed_logout[16];
+};
+
+struct kvm_s390_irq {
+ __u64 type;
+ union {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
+ struct kvm_s390_mchk_info mchk;
+ char reserved[64];
+ } u;
+};
+
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 flags; /* will stay unused for compatibility reasons */
+ __u32 len;
+ __u32 reserved[4]; /* will stay unused for compatibility reasons */
+};
+
+struct kvm_s390_ucas_mapping {
+ __u64 user_addr;
+ __u64 vcpu_addr;
+ __u64 length;
+};
+
+struct kvm_s390_pv_sec_parm {
+ __u64 origin;
+ __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+ __u64 addr;
+ __u64 size;
+ __u64 tweak;
+};
+
+enum pv_cmd_dmp_id {
+ KVM_PV_DUMP_INIT,
+ KVM_PV_DUMP_CONFIG_STOR_STATE,
+ KVM_PV_DUMP_COMPLETE,
+ KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+ __u64 subcmd;
+ __u64 buff_addr;
+ __u64 buff_len;
+ __u64 gaddr; /* For dump storage state */
+ __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+ KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+ __u64 dump_cpu_buffer_len;
+ __u64 dump_config_mem_buffer_per_1m;
+ __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len_max;
+ __u32 len_written;
+ __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ union {
+ struct kvm_s390_pv_info_dump dump;
+ struct kvm_s390_pv_info_vm vm;
+ };
+};
+
+enum pv_cmd_id {
+ KVM_PV_ENABLE,
+ KVM_PV_DISABLE,
+ KVM_PV_SET_SEC_PARMS,
+ KVM_PV_UNPACK,
+ KVM_PV_VERIFY,
+ KVM_PV_PREP_RESET,
+ KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
+ KVM_PV_DUMP,
+ KVM_PV_ASYNC_CLEANUP_PREPARE,
+ KVM_PV_ASYNC_CLEANUP_PERFORM,
+};
+
+struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+};
+
+struct kvm_s390_zpci_op {
+ /* in */
+ __u32 fh; /* target device */
+ __u8 op; /* operation to perform */
+ __u8 pad[3];
+ union {
+ /* for KVM_S390_ZPCIOP_REG_AEN */
+ struct {
+ __u64 ibv; /* Guest addr of interrupt bit vector */
+ __u64 sb; /* Guest addr of summary bit */
+ __u32 flags;
+ __u32 noi; /* Number of interrupts */
+ __u8 isc; /* Guest interrupt subclass */
+ __u8 sbo; /* Offset of guest summary bit vector */
+ __u16 pad;
+ } reg_aen;
+ __u64 reserved[8];
+ } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN 0
+#define KVM_S390_ZPCIOP_DEREG_AEN 1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Device control API: s390-specific devices */
#define KVM_DEV_FLIC_GET_ALL_IRQS 1
@@ -74,6 +387,7 @@ struct kvm_s390_io_adapter_req {
#define KVM_S390_VM_CRYPTO 2
#define KVM_S390_VM_CPU_MODEL 3
#define KVM_S390_VM_MIGRATION 4
+#define KVM_S390_VM_CPU_TOPOLOGY 5
/* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
@@ -158,6 +472,22 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 reserved[1728];
};
+#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
+#define KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST 7
+
+#define KVM_S390_VM_CPU_UV_FEAT_NR_BITS 64
+struct kvm_s390_vm_cpu_uv_feat {
+ union {
+ struct {
+ __u64 : 4;
+ __u64 ap : 1; /* bit 4 */
+ __u64 ap_intr : 1; /* bit 5 */
+ __u64 : 58;
+ };
+ __u64 feat;
+ };
+};
+
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 924b876f992c..5ad76471e73f 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -2,7 +2,7 @@
/*
* Userspace interface to the pkey device driver
*
- * Copyright IBM Corp. 2017, 2019
+ * Copyright IBM Corp. 2017, 2023
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@@ -26,16 +26,21 @@
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
#define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */
#define MINEP11AESKEYBLOBSIZE 256 /* min EP11 AES key blob size */
-#define MAXEP11AESKEYBLOBSIZE 320 /* max EP11 AES key blob size */
+#define MAXEP11AESKEYBLOBSIZE 336 /* max EP11 AES key blob size */
/* Minimum size of a key blob */
#define MINKEYBLOBSIZE SECKEYBLOBSIZE
/* defines for the type field within the pkey_protkey struct */
-#define PKEY_KEYTYPE_AES_128 1
-#define PKEY_KEYTYPE_AES_192 2
-#define PKEY_KEYTYPE_AES_256 3
-#define PKEY_KEYTYPE_ECC 4
+#define PKEY_KEYTYPE_AES_128 1
+#define PKEY_KEYTYPE_AES_192 2
+#define PKEY_KEYTYPE_AES_256 3
+#define PKEY_KEYTYPE_ECC 4
+#define PKEY_KEYTYPE_ECC_P256 5
+#define PKEY_KEYTYPE_ECC_P384 6
+#define PKEY_KEYTYPE_ECC_P521 7
+#define PKEY_KEYTYPE_ECC_ED25519 8
+#define PKEY_KEYTYPE_ECC_ED448 9
/* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type {
@@ -348,7 +353,7 @@ struct pkey_kblob2pkey2 {
* Is able to find out which type of secure key is given (CCA AES secure
* key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private
* key) and tries to find all matching crypto cards based on the MKVP and maybe
- * other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
+ * other criteria (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
* with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of
* APQNs is further filtered by the key's mkvp which needs to match to either
* the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters
@@ -365,7 +370,7 @@ struct pkey_kblob2pkey2 {
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
* but the number of apqn targets does not fit into the list, the apqn_targets
- * field is updatedd with the number of reqired entries but there are no apqn
+ * field is updated with the number of required entries but there are no apqn
* values stored in the list and the ioctl returns with ENOSPC. If no matching
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
*/
@@ -403,7 +408,7 @@ struct pkey_apqns4key {
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
* but the number of apqn targets does not fit into the list, the apqn_targets
- * field is updatedd with the number of reqired entries but there are no apqn
+ * field is updated with the number of required entries but there are no apqn
* values stored in the list and the ioctl returns with ENOSPC. If no matching
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
*/
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index ad64d673b5e6..bb0826024bb9 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -8,6 +8,8 @@
#ifndef _UAPI_S390_PTRACE_H
#define _UAPI_S390_PTRACE_H
+#include <linux/const.h>
+
/*
* Offsets in the user_regs_struct. They are used for the ptrace
* system call and in entry.S
@@ -166,6 +168,64 @@
#endif /* __s390x__ */
+#ifndef __s390x__
+
+#define PSW_MASK_PER _AC(0x40000000, UL)
+#define PSW_MASK_DAT _AC(0x04000000, UL)
+#define PSW_MASK_IO _AC(0x02000000, UL)
+#define PSW_MASK_EXT _AC(0x01000000, UL)
+#define PSW_MASK_KEY _AC(0x00F00000, UL)
+#define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */
+#define PSW_MASK_MCHECK _AC(0x00040000, UL)
+#define PSW_MASK_WAIT _AC(0x00020000, UL)
+#define PSW_MASK_PSTATE _AC(0x00010000, UL)
+#define PSW_MASK_ASC _AC(0x0000C000, UL)
+#define PSW_MASK_CC _AC(0x00003000, UL)
+#define PSW_MASK_PM _AC(0x00000F00, UL)
+#define PSW_MASK_RI _AC(0x00000000, UL)
+#define PSW_MASK_EA _AC(0x00000000, UL)
+#define PSW_MASK_BA _AC(0x00000000, UL)
+
+#define PSW_MASK_USER _AC(0x0000FF00, UL)
+
+#define PSW_ADDR_AMODE _AC(0x80000000, UL)
+#define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL)
+
+#define PSW_ASC_PRIMARY _AC(0x00000000, UL)
+#define PSW_ASC_ACCREG _AC(0x00004000, UL)
+#define PSW_ASC_SECONDARY _AC(0x00008000, UL)
+#define PSW_ASC_HOME _AC(0x0000C000, UL)
+
+#else /* __s390x__ */
+
+#define PSW_MASK_PER _AC(0x4000000000000000, UL)
+#define PSW_MASK_DAT _AC(0x0400000000000000, UL)
+#define PSW_MASK_IO _AC(0x0200000000000000, UL)
+#define PSW_MASK_EXT _AC(0x0100000000000000, UL)
+#define PSW_MASK_BASE _AC(0x0000000000000000, UL)
+#define PSW_MASK_KEY _AC(0x00F0000000000000, UL)
+#define PSW_MASK_MCHECK _AC(0x0004000000000000, UL)
+#define PSW_MASK_WAIT _AC(0x0002000000000000, UL)
+#define PSW_MASK_PSTATE _AC(0x0001000000000000, UL)
+#define PSW_MASK_ASC _AC(0x0000C00000000000, UL)
+#define PSW_MASK_CC _AC(0x0000300000000000, UL)
+#define PSW_MASK_PM _AC(0x00000F0000000000, UL)
+#define PSW_MASK_RI _AC(0x0000008000000000, UL)
+#define PSW_MASK_EA _AC(0x0000000100000000, UL)
+#define PSW_MASK_BA _AC(0x0000000080000000, UL)
+
+#define PSW_MASK_USER _AC(0x0000FF0180000000, UL)
+
+#define PSW_ADDR_AMODE _AC(0x0000000000000000, UL)
+#define PSW_ADDR_INSN _AC(0xFFFFFFFFFFFFFFFF, UL)
+
+#define PSW_ASC_PRIMARY _AC(0x0000000000000000, UL)
+#define PSW_ASC_ACCREG _AC(0x0000400000000000, UL)
+#define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL)
+#define PSW_ASC_HOME _AC(0x0000C00000000000, UL)
+
+#endif /* __s390x__ */
+
#define NUM_GPRS 16
#define NUM_FPRS 16
#define NUM_CRS 16
@@ -214,69 +274,6 @@ typedef struct {
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
-#ifndef __s390x__
-
-#define PSW_MASK_PER 0x40000000UL
-#define PSW_MASK_DAT 0x04000000UL
-#define PSW_MASK_IO 0x02000000UL
-#define PSW_MASK_EXT 0x01000000UL
-#define PSW_MASK_KEY 0x00F00000UL
-#define PSW_MASK_BASE 0x00080000UL /* always one */
-#define PSW_MASK_MCHECK 0x00040000UL
-#define PSW_MASK_WAIT 0x00020000UL
-#define PSW_MASK_PSTATE 0x00010000UL
-#define PSW_MASK_ASC 0x0000C000UL
-#define PSW_MASK_CC 0x00003000UL
-#define PSW_MASK_PM 0x00000F00UL
-#define PSW_MASK_RI 0x00000000UL
-#define PSW_MASK_EA 0x00000000UL
-#define PSW_MASK_BA 0x00000000UL
-
-#define PSW_MASK_USER 0x0000FF00UL
-
-#define PSW_ADDR_AMODE 0x80000000UL
-#define PSW_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
-
-#define PSW_ASC_PRIMARY 0x00000000UL
-#define PSW_ASC_ACCREG 0x00004000UL
-#define PSW_ASC_SECONDARY 0x00008000UL
-#define PSW_ASC_HOME 0x0000C000UL
-
-#else /* __s390x__ */
-
-#define PSW_MASK_PER 0x4000000000000000UL
-#define PSW_MASK_DAT 0x0400000000000000UL
-#define PSW_MASK_IO 0x0200000000000000UL
-#define PSW_MASK_EXT 0x0100000000000000UL
-#define PSW_MASK_BASE 0x0000000000000000UL
-#define PSW_MASK_KEY 0x00F0000000000000UL
-#define PSW_MASK_MCHECK 0x0004000000000000UL
-#define PSW_MASK_WAIT 0x0002000000000000UL
-#define PSW_MASK_PSTATE 0x0001000000000000UL
-#define PSW_MASK_ASC 0x0000C00000000000UL
-#define PSW_MASK_CC 0x0000300000000000UL
-#define PSW_MASK_PM 0x00000F0000000000UL
-#define PSW_MASK_RI 0x0000008000000000UL
-#define PSW_MASK_EA 0x0000000100000000UL
-#define PSW_MASK_BA 0x0000000080000000UL
-
-#define PSW_MASK_USER 0x0000FF0180000000UL
-
-#define PSW_ADDR_AMODE 0x0000000000000000UL
-#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
-
-#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
-
-#define PSW_ASC_PRIMARY 0x0000000000000000UL
-#define PSW_ASC_ACCREG 0x0000400000000000UL
-#define PSW_ASC_SECONDARY 0x0000800000000000UL
-#define PSW_ASC_HOME 0x0000C00000000000UL
-
-#endif /* __s390x__ */
-
-
/*
* The s390_regs structure is used to define the elf_gregset_t.
*/
diff --git a/arch/s390/include/uapi/asm/raw3270.h b/arch/s390/include/uapi/asm/raw3270.h
new file mode 100644
index 000000000000..6676f102bd50
--- /dev/null
+++ b/arch/s390/include/uapi/asm/raw3270.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_S390_UAPI_RAW3270_H
+#define __ASM_S390_UAPI_RAW3270_H
+
+/* Local Channel Commands */
+#define TC_WRITE 0x01 /* Write */
+#define TC_RDBUF 0x02 /* Read Buffer */
+#define TC_EWRITE 0x05 /* Erase write */
+#define TC_READMOD 0x06 /* Read modified */
+#define TC_EWRITEA 0x0d /* Erase write alternate */
+#define TC_WRITESF 0x11 /* Write structured field */
+
+/* Buffer Control Orders */
+#define TO_GE 0x08 /* Graphics Escape */
+#define TO_SF 0x1d /* Start field */
+#define TO_SBA 0x11 /* Set buffer address */
+#define TO_IC 0x13 /* Insert cursor */
+#define TO_PT 0x05 /* Program tab */
+#define TO_RA 0x3c /* Repeat to address */
+#define TO_SFE 0x29 /* Start field extended */
+#define TO_EUA 0x12 /* Erase unprotected to address */
+#define TO_MF 0x2c /* Modify field */
+#define TO_SA 0x28 /* Set attribute */
+
+/* Field Attribute Bytes */
+#define TF_INPUT 0x40 /* Visible input */
+#define TF_INPUTN 0x4c /* Invisible input */
+#define TF_INMDT 0xc1 /* Visible, Set-MDT */
+#define TF_LOG 0x60
+
+/* Character Attribute Bytes */
+#define TAT_RESET 0x00
+#define TAT_FIELD 0xc0
+#define TAT_EXTHI 0x41
+#define TAT_FGCOLOR 0x42
+#define TAT_CHARS 0x43
+#define TAT_BGCOLOR 0x45
+#define TAT_TRANS 0x46
+
+/* Extended-Highlighting Bytes */
+#define TAX_RESET 0x00
+#define TAX_BLINK 0xf1
+#define TAX_REVER 0xf2
+#define TAX_UNDER 0xf4
+
+/* Reset value */
+#define TAR_RESET 0x00
+
+/* Color values */
+#define TAC_RESET 0x00
+#define TAC_BLUE 0xf1
+#define TAC_RED 0xf2
+#define TAC_PINK 0xf3
+#define TAC_GREEN 0xf4
+#define TAC_TURQ 0xf5
+#define TAC_YELLOW 0xf6
+#define TAC_WHITE 0xf7
+#define TAC_DEFAULT 0x00
+
+/* Write Control Characters */
+#define TW_NONE 0x40 /* No particular action */
+#define TW_KR 0xc2 /* Keyboard restore */
+#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
+
+#define RAW3270_FIRSTMINOR 1 /* First minor number */
+#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */
+
+#define AID_CLEAR 0x6d
+#define AID_ENTER 0x7d
+#define AID_PF3 0xf3
+#define AID_PF7 0xf7
+#define AID_PF8 0xf8
+#define AID_READ_PARTITION 0x88
+
+#endif /* __ASM_S390_UAPI_RAW3270_H */
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 72604f7792c3..f85b50723dd3 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -30,7 +30,7 @@ struct statfs {
unsigned int f_namelen;
unsigned int f_frsize;
unsigned int f_flags;
- unsigned int f_spare[4];
+ unsigned int f_spare[5];
};
struct statfs64 {
@@ -45,7 +45,7 @@ struct statfs64 {
unsigned int f_namelen;
unsigned int f_frsize;
unsigned int f_flags;
- unsigned int f_spare[4];
+ unsigned int f_spare[5];
};
#endif
diff --git a/arch/s390/include/uapi/asm/termios.h b/arch/s390/include/uapi/asm/termios.h
deleted file mode 100644
index 54223169c806..000000000000
--- a/arch/s390/include/uapi/asm/termios.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/termios.h"
- */
-
-#ifndef _UAPI_S390_TERMIOS_H
-#define _UAPI_S390_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-
-#endif /* _UAPI_S390_TERMIOS_H */
diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h
index da034c606314..84457dbb26b4 100644
--- a/arch/s390/include/uapi/asm/types.h
+++ b/arch/s390/include/uapi/asm/types.h
@@ -12,15 +12,18 @@
#ifndef __ASSEMBLY__
-/* A address type so that arithmetic can be done on it & it can be upgraded to
- 64 bit when necessary
-*/
-typedef unsigned long addr_t;
+typedef unsigned long addr_t;
typedef __signed__ long saddr_t;
typedef struct {
- __u32 u[4];
-} __vector128;
+ union {
+ struct {
+ __u64 high;
+ __u64 low;
+ };
+ __u32 u[4];
+ };
+} __attribute__((packed, aligned(4))) __vector128;
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h
index 10a5ac918e02..b9c2f14a6af3 100644
--- a/arch/s390/include/uapi/asm/uvdevice.h
+++ b/arch/s390/include/uapi/asm/uvdevice.h
@@ -32,6 +32,33 @@ struct uvio_attest {
__u16 reserved136; /* 0x0136 */
};
+/**
+ * uvio_uvdev_info - Information of supported functions
+ * @supp_uvio_cmds - supported IOCTLs by this device
+ * @supp_uv_cmds - supported UVCs corresponding to the IOCTL
+ *
+ * UVIO request to get information about supported request types by this
+ * uvdevice and the Ultravisor. Everything is output. Bits are in LSB0
+ * ordering. If the bit is set in both, @supp_uvio_cmds and @supp_uv_cmds, the
+ * uvdevice and the Ultravisor support that call.
+ *
+ * Note that bit 0 (UVIO_IOCTL_UVDEV_INFO_NR) is always zero for `supp_uv_cmds`
+ * as there is no corresponding UV-call.
+ */
+struct uvio_uvdev_info {
+ /*
+ * If bit `n` is set, this device supports the IOCTL with nr `n`.
+ */
+ __u64 supp_uvio_cmds;
+ /*
+ * If bit `n` is set, the Ultravisor(UV) supports the UV-call
+ * corresponding to the IOCTL with nr `n` in the calling contextx (host
+ * or guest). The value is only valid if the corresponding bit in
+ * @supp_uvio_cmds is set as well.
+ */
+ __u64 supp_uv_cmds;
+};
+
/*
* The following max values define an upper length for the IOCTL in/out buffers.
* However, they do not represent the maximum the Ultravisor allows which is
@@ -42,10 +69,34 @@ struct uvio_attest {
#define UVIO_ATT_ARCB_MAX_LEN 0x100000
#define UVIO_ATT_MEASUREMENT_MAX_LEN 0x8000
#define UVIO_ATT_ADDITIONAL_MAX_LEN 0x8000
+#define UVIO_ADD_SECRET_MAX_LEN 0x100000
+#define UVIO_LIST_SECRETS_LEN 0x1000
#define UVIO_DEVICE_NAME "uv"
#define UVIO_TYPE_UVC 'u'
-#define UVIO_IOCTL_ATT _IOWR(UVIO_TYPE_UVC, 0x01, struct uvio_ioctl_cb)
+enum UVIO_IOCTL_NR {
+ UVIO_IOCTL_UVDEV_INFO_NR = 0x00,
+ UVIO_IOCTL_ATT_NR,
+ UVIO_IOCTL_ADD_SECRET_NR,
+ UVIO_IOCTL_LIST_SECRETS_NR,
+ UVIO_IOCTL_LOCK_SECRETS_NR,
+ /* must be the last entry */
+ UVIO_IOCTL_NUM_IOCTLS
+};
+
+#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
+#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
+#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
+#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+
+#define UVIO_SUPP_CALL(nr) (1ULL << (nr))
+#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
+#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
+#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
#endif /* __S390_ASM_UVDEVICE_H */
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index d83713f67530..f4785abe1b9f 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -85,7 +85,8 @@ struct ica_rsa_modexpo_crt {
struct CPRBX {
__u16 cprb_len; /* CPRB length 220 */
__u8 cprb_ver_id; /* CPRB version id. 0x02 */
- __u8 _pad_000[3]; /* Alignment pad bytes */
+ __u8 ctfm; /* Command Type Filtering Mask */
+ __u8 pad_000[2]; /* Alignment pad bytes */
__u8 func_id[2]; /* function id 0x5432 */
__u8 cprb_flags[4]; /* Flags */
__u32 req_parml; /* request parameter buffer len */