aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/nfit.h18
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/clocksource/timer-ti-dm.h394
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h5
-rw-r--r--include/dt-bindings/clock/tegra194-clock.h321
-rw-r--r--include/dt-bindings/gpio/tegra194-gpio.h61
-rw-r--r--include/dt-bindings/input/gpio-keys.h13
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h1
-rw-r--r--include/dt-bindings/power/mt2712-power.h3
-rw-r--r--include/dt-bindings/power/mt7623a-power.h10
-rw-r--r--include/dt-bindings/power/r8a77965-sysc.h30
-rw-r--r--include/dt-bindings/power/r8a77980-sysc.h43
-rw-r--r--include/dt-bindings/power/tegra194-powergate.h35
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h108
-rw-r--r--include/dt-bindings/reset/tegra194-reset.h152
-rw-r--r--include/dt-bindings/sound/rt5651.h15
-rw-r--r--include/kvm/arm_vgic.h14
-rw-r--r--include/linux/audit.h6
-rw-r--r--include/linux/blk-cgroup.h1
-rw-r--r--include/linux/blk-mq-pci.h3
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h121
-rw-r--r--include/linux/bsg-lib.h7
-rw-r--r--include/linux/bsg.h35
-rw-r--r--include/linux/clk/ti.h1
-rw-r--r--include/linux/compat.h6
-rw-r--r--include/linux/device-mapper.h14
-rw-r--r--include/linux/dm-bufio.h148
-rw-r--r--include/linux/dma-mapping.h19
-rw-r--r--include/linux/dmapool.h30
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/edac.h3
-rw-r--r--include/linux/f2fs_fs.h20
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/fs.h13
-rw-r--r--include/linux/fscache-cache.h26
-rw-r--r--include/linux/fscache.h142
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/gpio/driver.h16
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hid.h75
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/i2c-pca-platform.h3
-rw-r--r--include/linux/i2c.h30
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/inet.h1
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kernel.h72
-rw-r--r--include/linux/kvm_para.h5
-rw-r--r--include/linux/libfdt_env.h6
-rw-r--r--include/linux/libps2.h38
-rw-r--r--include/linux/lightnvm.h334
-rw-r--r--include/linux/list_lru.h3
-rw-r--r--include/linux/logic_pio.h123
-rw-r--r--include/linux/lsm_hooks.h475
-rw-r--r--include/linux/memblock.h13
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/memory_hotplug.h53
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/device.h12
-rw-r--r--include/linux/mlx5/driver.h15
-rw-r--r--include/linux/mlx5/fs_helpers.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h102
-rw-r--r--include/linux/mm.h56
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmdebug.h8
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/mtd.h19
-rw-r--r--include/linux/mtd/nand.h731
-rw-r--r--include/linux/mtd/nand_ecc.h2
-rw-r--r--include/linux/mtd/ndfc.h2
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/rawnand.h106
-rw-r--r--include/linux/node.h4
-rw-r--r--include/linux/of.h12
-rw-r--r--include/linux/page-flags.h22
-rw-r--r--include/linux/page_ref.h3
-rw-r--r--include/linux/pci-epc.h11
-rw-r--r--include/linux/pci-epf.h2
-rw-r--r--include/linux/pci.h94
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pcieport_if.h71
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h12
-rw-r--r--include/linux/platform_data/dmtimer-omap.h38
-rw-r--r--include/linux/platform_data/gpio-htc-egpio.h2
-rw-r--r--include/linux/platform_data/gpio-omap.h5
-rw-r--r--include/linux/platform_data/mtd-nand-pxa3xx.h43
-rw-r--r--include/linux/platform_data/phy-da8xx-usb.h21
-rw-r--r--include/linux/platform_data/pm33xx.h42
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h8
-rw-r--r--include/linux/platform_data/ti-sysc.h50
-rw-r--r--include/linux/power/smartreflex.h10
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/pstore_ram.h1
-rw-r--r--include/linux/quota.h1
-rw-r--r--include/linux/raid/pq.h4
-rw-r--r--include/linux/raid_class.h1
-rw-r--r--include/linux/reset-controller.h30
-rw-r--r--include/linux/rtsx_pci.h12
-rw-r--r--include/linux/sbitmap.h8
-rw-r--r--include/linux/scatterlist.h23
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/scmi_protocol.h277
-rw-r--r--include/linux/security.h30
-rw-r--r--include/linux/serial_s3c.h17
-rw-r--r--include/linux/slab.h20
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h28
-rw-r--r--include/linux/soc/mediatek/infracfg.h4
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h5
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h6
-rw-r--r--include/linux/sunrpc/svc.h6
-rw-r--r--include/linux/sunrpc/svc_rdma.h3
-rw-r--r--include/linux/sunrpc/svc_xprt.h6
-rw-r--r--include/linux/swap.h38
-rw-r--r--include/linux/tpm.h2
-rw-r--r--include/linux/usb/audio-v2.h8
-rw-r--r--include/linux/usb/audio-v3.h395
-rw-r--r--include/linux/usb/gadget.h12
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/i2c/saa6588.h1
-rw-r--r--include/net/iw_handler.h2
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h12
-rw-r--r--include/net/sock.h4
-rw-r--r--include/rdma/ib_addr.h9
-rw-r--r--include/rdma/ib_cache.h29
-rw-r--r--include/rdma/ib_sa.h13
-rw-r--r--include/rdma/ib_verbs.h212
-rw-r--r--include/rdma/rdma_cm.h44
-rw-r--r--include/rdma/rdma_vt.h2
-rw-r--r--include/rdma/restrack.h26
-rw-r--r--include/rdma/uverbs_ioctl.h153
-rw-r--r--include/rdma/uverbs_named_ioctl.h90
-rw-r--r--include/rdma/uverbs_std_types.h34
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_host.h36
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h18
-rw-r--r--include/soc/tegra/bpmp.h4
-rw-r--r--include/sound/da7219.h2
-rw-r--r--include/sound/dmaengine_pcm.h7
-rw-r--r--include/sound/emu10k1.h4
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/sound/pcm_oss.h1
-rw-r--r--include/sound/rt5651.h29
-rw-r--r--include/sound/rt5659.h1
-rw-r--r--include/sound/soc-dapm.h16
-rw-r--r--include/sound/soc.h14
-rw-r--r--include/trace/events/cachefiles.h325
-rw-r--r--include/trace/events/fscache.h537
-rw-r--r--include/trace/events/migrate.h2
-rw-r--r--include/trace/events/sunrpc.h204
-rw-r--r--include/trace/events/vmscan.h17
-rw-r--r--include/uapi/asm-generic/siginfo.h7
-rw-r--r--include/uapi/linux/blktrace_api.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/inotify.h8
-rw-r--r--include/uapi/linux/kvm.h23
-rw-r--r--include/uapi/linux/msdos_fs.h2
-rw-r--r--include/uapi/linux/pci_regs.h7
-rw-r--r--include/uapi/linux/qemu_fw_cfg.h97
-rw-r--r--include/uapi/linux/sctp.h1
-rw-r--r--include/uapi/linux/usb/audio.h1
-rw-r--r--include/uapi/linux/vfio.h27
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h21
-rw-r--r--include/uapi/rdma/cxgb3-abi.h17
-rw-r--r--include/uapi/rdma/cxgb4-abi.h29
-rw-r--r--include/uapi/rdma/hfi/hfi1_ioctl.h32
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h10
-rw-r--r--include/uapi/rdma/hns-abi.h22
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_cm.h48
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h134
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h96
-rw-r--r--include/uapi/rdma/ib_user_mad.h4
-rw-r--r--include/uapi/rdma/ib_user_verbs.h195
-rw-r--r--include/uapi/rdma/mlx4-abi.h52
-rw-r--r--include/uapi/rdma/mlx5-abi.h72
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h48
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h43
-rw-r--r--include/uapi/rdma/mthca-abi.h10
-rw-r--r--include/uapi/rdma/nes-abi.h6
-rw-r--r--include/uapi/rdma/ocrdma-abi.h36
-rw-r--r--include/uapi/rdma/qedr-abi.h20
-rw-r--r--include/uapi/rdma/rdma_netlink.h51
-rw-r--r--include/uapi/rdma/rdma_user_cm.h49
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h38
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h99
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h58
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h49
-rw-r--r--include/uapi/sound/asound.h1
197 files changed, 7197 insertions, 1779 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c9608b0b80c6..ba4dd54f2c82 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -215,7 +215,7 @@ struct acpi_device_flags {
u32 of_compatible_ok:1;
u32 coherent_dma:1;
u32 cca_seen:1;
- u32 serial_bus_slave:1;
+ u32 enumeration_by_parent:1;
u32 reserved:19;
};
diff --git a/include/acpi/nfit.h b/include/acpi/nfit.h
new file mode 100644
index 000000000000..86ed07c1200d
--- /dev/null
+++ b/include/acpi/nfit.h
@@ -0,0 +1,18 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __ACPI_NFIT_H
+#define __ACPI_NFIT_H
+
+#if IS_ENABLED(CONFIG_ACPI_NFIT)
+int nfit_get_smbios_id(u32 device_handle, u16 *flags);
+#else
+static inline int nfit_get_smbios_id(u32 device_handle, u16 *flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* __ACPI_NFIT_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 8de9cc251286..04c4cc6fd820 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -351,6 +351,8 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
+#include <linux/logic_pio.h>
+
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
@@ -899,7 +901,7 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return PCI_IOBASE + (port & IO_SPACE_LIMIT);
+ return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
}
#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 18c6abe81fbd..728e5c5706c4 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -19,6 +19,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
static inline bool kvm_para_available(void)
{
return false;
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
new file mode 100644
index 000000000000..7d9598dc578d
--- /dev/null
+++ b/include/clocksource/timer-ti-dm.h
@@ -0,0 +1,394 @@
+/*
+ * OMAP Dual-Mode Timers
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Platform device conversion and hwmod support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ * PWM and clock framwork support by Timo Teras.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#ifndef __CLOCKSOURCE_DMTIMER_H
+#define __CLOCKSOURCE_DMTIMER_H
+
+/* clock sources */
+#define OMAP_TIMER_SRC_SYS_CLK 0x00
+#define OMAP_TIMER_SRC_32_KHZ 0x01
+#define OMAP_TIMER_SRC_EXT_CLK 0x02
+
+/* timer interrupt enable bits */
+#define OMAP_TIMER_INT_CAPTURE (1 << 2)
+#define OMAP_TIMER_INT_OVERFLOW (1 << 1)
+#define OMAP_TIMER_INT_MATCH (1 << 0)
+
+/* trigger types */
+#define OMAP_TIMER_TRIGGER_NONE 0x00
+#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
+#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
+
+/* posted mode types */
+#define OMAP_TIMER_NONPOSTED 0x00
+#define OMAP_TIMER_POSTED 0x01
+
+/* timer capabilities used in hwmod database */
+#define OMAP_TIMER_SECURE 0x80000000
+#define OMAP_TIMER_ALWON 0x40000000
+#define OMAP_TIMER_HAS_PWM 0x20000000
+#define OMAP_TIMER_NEEDS_RESET 0x10000000
+#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000
+
+/*
+ * timer errata flags
+ *
+ * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
+ * errata prevents us from using posted mode on these devices, unless the
+ * timer counter register is never read. For more details please refer to
+ * the OMAP3/4/5 errata documents.
+ */
+#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
+
+struct timer_regs {
+ u32 tidr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct omap_dm_timer {
+ int id;
+ int irq;
+ struct clk *fclk;
+
+ void __iomem *io_base;
+ void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */
+ void __iomem *irq_ena; /* irq enable */
+ void __iomem *irq_dis; /* irq disable, only on v2 ip */
+ void __iomem *pend; /* write pending */
+ void __iomem *func_base; /* function register base */
+
+ unsigned long rate;
+ unsigned reserved:1;
+ unsigned posted:1;
+ struct timer_regs context;
+ int (*get_context_loss_count)(struct device *);
+ int ctx_loss_count;
+ int revision;
+ u32 capability;
+ u32 errata;
+ struct platform_device *pdev;
+ struct list_head node;
+};
+
+int omap_dm_timer_reserve_systimer(int id);
+struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap);
+
+int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
+
+u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
+
+int omap_dm_timer_trigger(struct omap_dm_timer *timer);
+
+int omap_dm_timers_active(void);
+
+/*
+ * Do not use the defines below, they are not needed. They should be only
+ * used by dmtimer.c and sys_timer related code.
+ */
+
+/*
+ * The interrupt registers are different between v1 and v2 ip.
+ * These registers are offsets from timer->iobase.
+ */
+#define OMAP_TIMER_ID_OFFSET 0x00
+#define OMAP_TIMER_OCP_CFG_OFFSET 0x10
+
+#define OMAP_TIMER_V1_SYS_STAT_OFFSET 0x14
+#define OMAP_TIMER_V1_STAT_OFFSET 0x18
+#define OMAP_TIMER_V1_INT_EN_OFFSET 0x1c
+
+#define OMAP_TIMER_V2_IRQSTATUS_RAW 0x24
+#define OMAP_TIMER_V2_IRQSTATUS 0x28
+#define OMAP_TIMER_V2_IRQENABLE_SET 0x2c
+#define OMAP_TIMER_V2_IRQENABLE_CLR 0x30
+
+/*
+ * The functional registers have a different base on v1 and v2 ip.
+ * These registers are offsets from timer->func_base. The func_base
+ * is samae as io_base for v1 and io_base + 0x14 for v2 ip.
+ *
+ */
+#define OMAP_TIMER_V2_FUNC_OFFSET 0x14
+
+#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20
+#define _OMAP_TIMER_CTRL_OFFSET 0x24
+#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
+#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
+#define OMAP_TIMER_CTRL_PT (1 << 12)
+#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
+#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
+#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
+#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
+#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
+#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
+#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */
+#define OMAP_TIMER_CTRL_POSTED (1 << 2)
+#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
+#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
+#define _OMAP_TIMER_COUNTER_OFFSET 0x28
+#define _OMAP_TIMER_LOAD_OFFSET 0x2c
+#define _OMAP_TIMER_TRIGGER_OFFSET 0x30
+#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34
+#define WP_NONE 0 /* no write pending bit */
+#define WP_TCLR (1 << 0)
+#define WP_TCRR (1 << 1)
+#define WP_TLDR (1 << 2)
+#define WP_TTGR (1 << 3)
+#define WP_TMAR (1 << 4)
+#define WP_TPIR (1 << 5)
+#define WP_TNIR (1 << 6)
+#define WP_TCVR (1 << 7)
+#define WP_TOCR (1 << 8)
+#define WP_TOWR (1 << 9)
+#define _OMAP_TIMER_MATCH_OFFSET 0x38
+#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c
+#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40
+#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */
+#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */
+#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */
+#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
+
+/* register offsets with the write pending bit encoded */
+#define WPSHIFT 16
+
+#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
+ | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
+ | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
+ | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
+ | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
+ | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
+ | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
+ | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
+ | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+/*
+ * The below are inlined to optimize code size for system timers. Other code
+ * should not need these at all, see
+ * include/linux/platform_data/pwm_omap_dmtimer.h
+ */
+#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS)
+static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
+ int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ return readl_relaxed(timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
+ u32 reg, u32 val, int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ writel_relaxed(val, timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+{
+ u32 tidr;
+
+ /* Assume v1 ip if bits [31:16] are zero */
+ tidr = readl_relaxed(timer->io_base);
+ if (!(tidr >> 16)) {
+ timer->revision = 1;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
+ timer->func_base = timer->io_base;
+ } else {
+ timer->revision = 2;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->pend = timer->io_base +
+ _OMAP_TIMER_WRITE_PEND_OFFSET +
+ OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
+ }
+}
+
+/*
+ * __omap_dm_timer_enable_posted - enables write posted mode
+ * @timer: pointer to timer instance handle
+ *
+ * Enables the write posted mode for the timer. When posted mode is enabled
+ * writes to certain timer registers are immediately acknowledged by the
+ * internal bus and hence prevents stalling the CPU waiting for the write to
+ * complete. Enabling this feature can improve performance for writing to the
+ * timer registers.
+ */
+static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+{
+ if (timer->posted)
+ return;
+
+ if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
+ timer->posted = OMAP_TIMER_NONPOSTED;
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ return;
+ }
+
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
+ OMAP_TIMER_CTRL_POSTED, 0);
+ timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
+ timer->posted = OMAP_TIMER_POSTED;
+}
+
+/**
+ * __omap_dm_timer_override_errata - override errata flags for a timer
+ * @timer: pointer to timer handle
+ * @errata: errata flags to be ignored
+ *
+ * For a given timer, override a timer errata by clearing the flags
+ * specified by the errata argument. A specific erratum should only be
+ * overridden for a timer if the timer is used in such a way the erratum
+ * has no impact.
+ */
+static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer,
+ u32 errata)
+{
+ timer->errata &= ~errata;
+}
+
+static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
+ int posted, unsigned long rate)
+{
+ u32 l;
+
+ l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~0x1;
+ __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ /* Readback to make sure write has completed */
+ __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ /*
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+ udelay(3500000 / rate + 1);
+#endif
+ }
+
+ /* Ack possibly pending interrupt */
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+}
+
+static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer,
+ u32 ctrl, unsigned int load,
+ int posted)
+{
+ __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted);
+ __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted);
+}
+
+static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_ena);
+ __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+}
+
+static inline unsigned int
+__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+{
+ return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+}
+
+static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_stat);
+}
+#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */
+#endif /* __CLOCKSOURCE_DMTIMER_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index e2f99ae72d5c..b2325d3e236a 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -452,5 +452,8 @@
#define IMX7D_OCOTP_CLK 439
#define IMX7D_NAND_RAWNAND_CLK 440
#define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441
-#define IMX7D_CLK_END 442
+#define IMX7D_SNVS_CLK 442
+#define IMX7D_CAAM_CLK 443
+#define IMX7D_KPP_ROOT_CLK 444
+#define IMX7D_CLK_END 445
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/tegra194-clock.h b/include/dt-bindings/clock/tegra194-clock.h
new file mode 100644
index 000000000000..a2ff66342d69
--- /dev/null
+++ b/include/dt-bindings/clock/tegra194-clock.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T194_CLOCK_H
+#define __ABI_MACH_T194_CLOCK_H
+
+#define TEGRA194_CLK_ACTMON 1
+#define TEGRA194_CLK_ADSP 2
+#define TEGRA194_CLK_ADSPNEON 3
+#define TEGRA194_CLK_AHUB 4
+#define TEGRA194_CLK_APB2APE 5
+#define TEGRA194_CLK_APE 6
+#define TEGRA194_CLK_AUD_MCLK 7
+#define TEGRA194_CLK_AXI_CBB 8
+#define TEGRA194_CLK_CAN1 9
+#define TEGRA194_CLK_CAN1_HOST 10
+#define TEGRA194_CLK_CAN2 11
+#define TEGRA194_CLK_CAN2_HOST 12
+#define TEGRA194_CLK_CEC 13
+#define TEGRA194_CLK_CLK_M 14
+#define TEGRA194_CLK_DMIC1 15
+#define TEGRA194_CLK_DMIC2 16
+#define TEGRA194_CLK_DMIC3 17
+#define TEGRA194_CLK_DMIC4 18
+#define TEGRA194_CLK_DPAUX 19
+#define TEGRA194_CLK_DPAUX1 20
+#define TEGRA194_CLK_ACLK 21
+#define TEGRA194_CLK_MSS_ENCRYPT 22
+#define TEGRA194_CLK_EQOS_RX_INPUT 23
+#define TEGRA194_CLK_IQC2 24
+#define TEGRA194_CLK_AON_APB 25
+#define TEGRA194_CLK_AON_NIC 26
+#define TEGRA194_CLK_AON_CPU_NIC 27
+#define TEGRA194_CLK_PLLA1 28
+#define TEGRA194_CLK_DSPK1 29
+#define TEGRA194_CLK_DSPK2 30
+#define TEGRA194_CLK_EMC 31
+#define TEGRA194_CLK_EQOS_AXI 32
+#define TEGRA194_CLK_EQOS_PTP_REF 33
+#define TEGRA194_CLK_EQOS_RX 34
+#define TEGRA194_CLK_EQOS_TX 35
+#define TEGRA194_CLK_EXTPERIPH1 36
+#define TEGRA194_CLK_EXTPERIPH2 37
+#define TEGRA194_CLK_EXTPERIPH3 38
+#define TEGRA194_CLK_EXTPERIPH4 39
+#define TEGRA194_CLK_FUSE 40
+#define TEGRA194_CLK_GPCCLK 41
+#define TEGRA194_CLK_GPU_PWR 42
+#define TEGRA194_CLK_HDA 43
+#define TEGRA194_CLK_HDA2CODEC_2X 44
+#define TEGRA194_CLK_HDA2HDMICODEC 45
+#define TEGRA194_CLK_HOST1X 46
+#define TEGRA194_CLK_HSIC_TRK 47
+#define TEGRA194_CLK_I2C1 48
+#define TEGRA194_CLK_I2C2 49
+#define TEGRA194_CLK_I2C3 50
+#define TEGRA194_CLK_I2C4 51
+#define TEGRA194_CLK_I2C6 52
+#define TEGRA194_CLK_I2C7 53
+#define TEGRA194_CLK_I2C8 54
+#define TEGRA194_CLK_I2C9 55
+#define TEGRA194_CLK_I2S1 56
+#define TEGRA194_CLK_I2S1_SYNC_INPUT 57
+#define TEGRA194_CLK_I2S2 58
+#define TEGRA194_CLK_I2S2_SYNC_INPUT 59
+#define TEGRA194_CLK_I2S3 60
+#define TEGRA194_CLK_I2S3_SYNC_INPUT 61
+#define TEGRA194_CLK_I2S4 62
+#define TEGRA194_CLK_I2S4_SYNC_INPUT 63
+#define TEGRA194_CLK_I2S5 64
+#define TEGRA194_CLK_I2S5_SYNC_INPUT 65
+#define TEGRA194_CLK_I2S6 66
+#define TEGRA194_CLK_I2S6_SYNC_INPUT 67
+#define TEGRA194_CLK_IQC1 68
+#define TEGRA194_CLK_ISP 69
+#define TEGRA194_CLK_KFUSE 70
+#define TEGRA194_CLK_MAUD 71
+#define TEGRA194_CLK_MIPI_CAL 72
+#define TEGRA194_CLK_MPHY_CORE_PLL_FIXED 73
+#define TEGRA194_CLK_MPHY_L0_RX_ANA 74
+#define TEGRA194_CLK_MPHY_L0_RX_LS_BIT 75
+#define TEGRA194_CLK_MPHY_L0_RX_SYMB 76
+#define TEGRA194_CLK_MPHY_L0_TX_LS_3XBIT 77
+#define TEGRA194_CLK_MPHY_L0_TX_SYMB 78
+#define TEGRA194_CLK_MPHY_L1_RX_ANA 79
+#define TEGRA194_CLK_MPHY_TX_1MHZ_REF 80
+#define TEGRA194_CLK_NVCSI 81
+#define TEGRA194_CLK_NVCSILP 82
+#define TEGRA194_CLK_NVDEC 83
+#define TEGRA194_CLK_NVDISPLAYHUB 84
+#define TEGRA194_CLK_NVDISPLAY_DISP 85
+#define TEGRA194_CLK_NVDISPLAY_P0 86
+#define TEGRA194_CLK_NVDISPLAY_P1 87
+#define TEGRA194_CLK_NVDISPLAY_P2 88
+#define TEGRA194_CLK_NVENC 89
+#define TEGRA194_CLK_NVJPG 90
+#define TEGRA194_CLK_OSC 91
+#define TEGRA194_CLK_AON_TOUCH 92
+#define TEGRA194_CLK_PLLA 93
+#define TEGRA194_CLK_PLLAON 94
+#define TEGRA194_CLK_PLLD 95
+#define TEGRA194_CLK_PLLD2 96
+#define TEGRA194_CLK_PLLD3 97
+#define TEGRA194_CLK_PLLDP 98
+#define TEGRA194_CLK_PLLD4 99
+#define TEGRA194_CLK_PLLE 100
+#define TEGRA194_CLK_PLLP 101
+#define TEGRA194_CLK_PLLP_OUT0 102
+#define TEGRA194_CLK_UTMIPLL 103
+#define TEGRA194_CLK_PLLA_OUT0 104
+#define TEGRA194_CLK_PWM1 105
+#define TEGRA194_CLK_PWM2 106
+#define TEGRA194_CLK_PWM3 107
+#define TEGRA194_CLK_PWM4 108
+#define TEGRA194_CLK_PWM5 109
+#define TEGRA194_CLK_PWM6 110
+#define TEGRA194_CLK_PWM7 111
+#define TEGRA194_CLK_PWM8 112
+#define TEGRA194_CLK_RCE_CPU_NIC 113
+#define TEGRA194_CLK_RCE_NIC 114
+#define TEGRA194_CLK_SATA 115
+#define TEGRA194_CLK_SATA_OOB 116
+#define TEGRA194_CLK_AON_I2C_SLOW 117
+#define TEGRA194_CLK_SCE_CPU_NIC 118
+#define TEGRA194_CLK_SCE_NIC 119
+#define TEGRA194_CLK_SDMMC1 120
+#define TEGRA194_CLK_UPHY_PLL3 121
+#define TEGRA194_CLK_SDMMC3 122
+#define TEGRA194_CLK_SDMMC4 123
+#define TEGRA194_CLK_SE 124
+#define TEGRA194_CLK_SOR0_OUT 125
+#define TEGRA194_CLK_SOR0_REF 126
+#define TEGRA194_CLK_SOR0_PAD_CLKOUT 127
+#define TEGRA194_CLK_SOR1_OUT 128
+#define TEGRA194_CLK_SOR1_REF 129
+#define TEGRA194_CLK_SOR1_PAD_CLKOUT 130
+#define TEGRA194_CLK_SOR_SAFE 131
+#define TEGRA194_CLK_IQC1_IN 132
+#define TEGRA194_CLK_IQC2_IN 133
+#define TEGRA194_CLK_DMIC5 134
+#define TEGRA194_CLK_SPI1 135
+#define TEGRA194_CLK_SPI2 136
+#define TEGRA194_CLK_SPI3 137
+#define TEGRA194_CLK_I2C_SLOW 138
+#define TEGRA194_CLK_SYNC_DMIC1 139
+#define TEGRA194_CLK_SYNC_DMIC2 140
+#define TEGRA194_CLK_SYNC_DMIC3 141
+#define TEGRA194_CLK_SYNC_DMIC4 142
+#define TEGRA194_CLK_SYNC_DSPK1 143
+#define TEGRA194_CLK_SYNC_DSPK2 144
+#define TEGRA194_CLK_SYNC_I2S1 145
+#define TEGRA194_CLK_SYNC_I2S2 146
+#define TEGRA194_CLK_SYNC_I2S3 147
+#define TEGRA194_CLK_SYNC_I2S4 148
+#define TEGRA194_CLK_SYNC_I2S5 149
+#define TEGRA194_CLK_SYNC_I2S6 150
+#define TEGRA194_CLK_MPHY_FORCE_LS_MODE 151
+#define TEGRA194_CLK_TACH 152
+#define TEGRA194_CLK_TSEC 153
+#define TEGRA194_CLK_TSECB 154
+#define TEGRA194_CLK_UARTA 155
+#define TEGRA194_CLK_UARTB 156
+#define TEGRA194_CLK_UARTC 157
+#define TEGRA194_CLK_UARTD 158
+#define TEGRA194_CLK_UARTE 159
+#define TEGRA194_CLK_UARTF 160
+#define TEGRA194_CLK_UARTG 161
+#define TEGRA194_CLK_UART_FST_MIPI_CAL 162
+#define TEGRA194_CLK_UFSDEV_REF 163
+#define TEGRA194_CLK_UFSHC 164
+#define TEGRA194_CLK_USB2_TRK 165
+#define TEGRA194_CLK_VI 166
+#define TEGRA194_CLK_VIC 167
+#define TEGRA194_CLK_PVA0_AXI 168
+#define TEGRA194_CLK_PVA0_VPS0 169
+#define TEGRA194_CLK_PVA0_VPS1 170
+#define TEGRA194_CLK_PVA1_AXI 171
+#define TEGRA194_CLK_PVA1_VPS0 172
+#define TEGRA194_CLK_PVA1_VPS1 173
+#define TEGRA194_CLK_DLA0_FALCON 174
+#define TEGRA194_CLK_DLA0_CORE 175
+#define TEGRA194_CLK_DLA1_FALCON 176
+#define TEGRA194_CLK_DLA1_CORE 177
+#define TEGRA194_CLK_SOR2_OUT 178
+#define TEGRA194_CLK_SOR2_REF 179
+#define TEGRA194_CLK_SOR2_PAD_CLKOUT 180
+#define TEGRA194_CLK_SOR3_OUT 181
+#define TEGRA194_CLK_SOR3_REF 182
+#define TEGRA194_CLK_SOR3_PAD_CLKOUT 183
+#define TEGRA194_CLK_NVDISPLAY_P3 184
+#define TEGRA194_CLK_DPAUX2 185
+#define TEGRA194_CLK_DPAUX3 186
+#define TEGRA194_CLK_NVDEC1 187
+#define TEGRA194_CLK_NVENC1 188
+#define TEGRA194_CLK_SE_FREE 189
+#define TEGRA194_CLK_UARTH 190
+#define TEGRA194_CLK_FUSE_SERIAL 191
+#define TEGRA194_CLK_QSPI0 192
+#define TEGRA194_CLK_QSPI1 193
+#define TEGRA194_CLK_QSPI0_PM 194
+#define TEGRA194_CLK_QSPI1_PM 195
+#define TEGRA194_CLK_VI_CONST 196
+#define TEGRA194_CLK_NAFLL_BPMP 197
+#define TEGRA194_CLK_NAFLL_SCE 198
+#define TEGRA194_CLK_NAFLL_NVDEC 199
+#define TEGRA194_CLK_NAFLL_NVJPG 200
+#define TEGRA194_CLK_NAFLL_TSEC 201
+#define TEGRA194_CLK_NAFLL_TSECB 202
+#define TEGRA194_CLK_NAFLL_VI 203
+#define TEGRA194_CLK_NAFLL_SE 204
+#define TEGRA194_CLK_NAFLL_NVENC 205
+#define TEGRA194_CLK_NAFLL_ISP 206
+#define TEGRA194_CLK_NAFLL_VIC 207
+#define TEGRA194_CLK_NAFLL_NVDISPLAYHUB 208
+#define TEGRA194_CLK_NAFLL_AXICBB 209
+#define TEGRA194_CLK_NAFLL_DLA 210
+#define TEGRA194_CLK_NAFLL_PVA_CORE 211
+#define TEGRA194_CLK_NAFLL_PVA_VPS 212
+#define TEGRA194_CLK_NAFLL_CVNAS 213
+#define TEGRA194_CLK_NAFLL_RCE 214
+#define TEGRA194_CLK_NAFLL_NVENC1 215
+#define TEGRA194_CLK_NAFLL_DLA_FALCON 216
+#define TEGRA194_CLK_NAFLL_NVDEC1 217
+#define TEGRA194_CLK_NAFLL_GPU 218
+#define TEGRA194_CLK_SDMMC_LEGACY_TM 219
+#define TEGRA194_CLK_PEX0_CORE_0 220
+#define TEGRA194_CLK_PEX0_CORE_1 221
+#define TEGRA194_CLK_PEX0_CORE_2 222
+#define TEGRA194_CLK_PEX0_CORE_3 223
+#define TEGRA194_CLK_PEX0_CORE_4 224
+#define TEGRA194_CLK_PEX1_CORE_5 225
+#define TEGRA194_CLK_PEX_REF1 226
+#define TEGRA194_CLK_PEX_REF2 227
+#define TEGRA194_CLK_CSI_A 229
+#define TEGRA194_CLK_CSI_B 230
+#define TEGRA194_CLK_CSI_C 231
+#define TEGRA194_CLK_CSI_D 232
+#define TEGRA194_CLK_CSI_E 233
+#define TEGRA194_CLK_CSI_F 234
+#define TEGRA194_CLK_CSI_G 235
+#define TEGRA194_CLK_CSI_H 236
+#define TEGRA194_CLK_PLLC4 237
+#define TEGRA194_CLK_PLLC4_OUT 238
+#define TEGRA194_CLK_PLLC4_OUT1 239
+#define TEGRA194_CLK_PLLC4_OUT2 240
+#define TEGRA194_CLK_PLLC4_MUXED 241
+#define TEGRA194_CLK_PLLC4_VCO_DIV2 242
+#define TEGRA194_CLK_CSI_A_PAD 244
+#define TEGRA194_CLK_CSI_B_PAD 245
+#define TEGRA194_CLK_CSI_C_PAD 246
+#define TEGRA194_CLK_CSI_D_PAD 247
+#define TEGRA194_CLK_CSI_E_PAD 248
+#define TEGRA194_CLK_CSI_F_PAD 249
+#define TEGRA194_CLK_CSI_G_PAD 250
+#define TEGRA194_CLK_CSI_H_PAD 251
+#define TEGRA194_CLK_PEX_SATA_USB_RX_BYP 254
+#define TEGRA194_CLK_PEX_USB_PAD_PLL0_MGMT 255
+#define TEGRA194_CLK_PEX_USB_PAD_PLL1_MGMT 256
+#define TEGRA194_CLK_PEX_USB_PAD_PLL2_MGMT 257
+#define TEGRA194_CLK_PEX_USB_PAD_PLL3_MGMT 258
+#define TEGRA194_CLK_XUSB_CORE_DEV 265
+#define TEGRA194_CLK_XUSB_CORE_MUX 266
+#define TEGRA194_CLK_XUSB_CORE_HOST 267
+#define TEGRA194_CLK_XUSB_CORE_SS 268
+#define TEGRA194_CLK_XUSB_FALCON 269
+#define TEGRA194_CLK_XUSB_FALCON_HOST 270
+#define TEGRA194_CLK_XUSB_FALCON_SS 271
+#define TEGRA194_CLK_XUSB_FS 272
+#define TEGRA194_CLK_XUSB_FS_HOST 273
+#define TEGRA194_CLK_XUSB_FS_DEV 274
+#define TEGRA194_CLK_XUSB_SS 275
+#define TEGRA194_CLK_XUSB_SS_DEV 276
+#define TEGRA194_CLK_XUSB_SS_SUPERSPEED 277
+#define TEGRA194_CLK_PLLDISPHUB 278
+#define TEGRA194_CLK_PLLDISPHUB_DIV 279
+#define TEGRA194_CLK_NAFLL_CLUSTER0 280
+#define TEGRA194_CLK_NAFLL_CLUSTER1 281
+#define TEGRA194_CLK_NAFLL_CLUSTER2 282
+#define TEGRA194_CLK_NAFLL_CLUSTER3 283
+#define TEGRA194_CLK_CAN1_CORE 284
+#define TEGRA194_CLK_CAN2_CORE 285
+#define TEGRA194_CLK_PLLA1_OUT1 286
+#define TEGRA194_CLK_PLLREFE_VCOOUT 288
+#define TEGRA194_CLK_CLK_32K 289
+#define TEGRA194_CLK_SPDIFIN_SYNC_INPUT 290
+#define TEGRA194_CLK_UTMIPLL_CLKOUT48 291
+#define TEGRA194_CLK_UTMIPLL_CLKOUT480 292
+#define TEGRA194_CLK_CVNAS 293
+#define TEGRA194_CLK_PLLNVCSI 294
+#define TEGRA194_CLK_PVA0_CPU_AXI 295
+#define TEGRA194_CLK_PVA1_CPU_AXI 296
+#define TEGRA194_CLK_PVA0_VPS 297
+#define TEGRA194_CLK_PVA1_VPS 298
+#define TEGRA194_CLK_DLA0_FALCON_MUX 299
+#define TEGRA194_CLK_DLA1_FALCON_MUX 300
+#define TEGRA194_CLK_DLA0_CORE_MUX 301
+#define TEGRA194_CLK_DLA1_CORE_MUX 302
+#define TEGRA194_CLK_UTMIPLL_HPS 304
+#define TEGRA194_CLK_I2C5 305
+#define TEGRA194_CLK_I2C10 306
+#define TEGRA194_CLK_BPMP_CPU_NIC 307
+#define TEGRA194_CLK_BPMP_APB 308
+#define TEGRA194_CLK_TSC 309
+#define TEGRA194_CLK_EMCSA 310
+#define TEGRA194_CLK_EMCSB 311
+#define TEGRA194_CLK_EMCSC 312
+#define TEGRA194_CLK_EMCSD 313
+#define TEGRA194_CLK_PLLC 314
+#define TEGRA194_CLK_PLLC2 315
+#define TEGRA194_CLK_PLLC3 316
+#define TEGRA194_CLK_TSC_REF 317
+#define TEGRA194_CLK_FUSE_BURN 318
+#define TEGRA194_CLK_PEX0_CORE_0M 319
+#define TEGRA194_CLK_PEX0_CORE_1M 320
+#define TEGRA194_CLK_PEX0_CORE_2M 321
+#define TEGRA194_CLK_PEX0_CORE_3M 322
+#define TEGRA194_CLK_PEX0_CORE_4M 323
+#define TEGRA194_CLK_PEX1_CORE_5M 324
+#define TEGRA194_CLK_PLLE_HPS 326
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra194-gpio.h b/include/dt-bindings/gpio/tegra194-gpio.h
new file mode 100644
index 000000000000..ede860225f6b
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra194-gpio.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for binding nvidia,tegra194-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA194_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA194_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA194_MAIN_GPIO_PORT_A 0
+#define TEGRA194_MAIN_GPIO_PORT_B 1
+#define TEGRA194_MAIN_GPIO_PORT_C 2
+#define TEGRA194_MAIN_GPIO_PORT_D 3
+#define TEGRA194_MAIN_GPIO_PORT_E 4
+#define TEGRA194_MAIN_GPIO_PORT_F 5
+#define TEGRA194_MAIN_GPIO_PORT_G 6
+#define TEGRA194_MAIN_GPIO_PORT_H 7
+#define TEGRA194_MAIN_GPIO_PORT_I 8
+#define TEGRA194_MAIN_GPIO_PORT_J 9
+#define TEGRA194_MAIN_GPIO_PORT_K 10
+#define TEGRA194_MAIN_GPIO_PORT_L 11
+#define TEGRA194_MAIN_GPIO_PORT_M 12
+#define TEGRA194_MAIN_GPIO_PORT_N 13
+#define TEGRA194_MAIN_GPIO_PORT_O 14
+#define TEGRA194_MAIN_GPIO_PORT_P 15
+#define TEGRA194_MAIN_GPIO_PORT_Q 16
+#define TEGRA194_MAIN_GPIO_PORT_R 17
+#define TEGRA194_MAIN_GPIO_PORT_S 18
+#define TEGRA194_MAIN_GPIO_PORT_T 19
+#define TEGRA194_MAIN_GPIO_PORT_U 20
+#define TEGRA194_MAIN_GPIO_PORT_V 21
+#define TEGRA194_MAIN_GPIO_PORT_W 22
+#define TEGRA194_MAIN_GPIO_PORT_X 23
+#define TEGRA194_MAIN_GPIO_PORT_Y 24
+#define TEGRA194_MAIN_GPIO_PORT_Z 25
+#define TEGRA194_MAIN_GPIO_PORT_FF 26
+#define TEGRA194_MAIN_GPIO_PORT_GG 27
+
+#define TEGRA194_MAIN_GPIO(port, offset) \
+ ((TEGRA194_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA194_AON_GPIO_PORT_AA 0
+#define TEGRA194_AON_GPIO_PORT_BB 1
+#define TEGRA194_AON_GPIO_PORT_CC 2
+#define TEGRA194_AON_GPIO_PORT_DD 3
+#define TEGRA194_AON_GPIO_PORT_EE 4
+
+#define TEGRA194_AON_GPIO(port, offset) \
+ ((TEGRA194_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/input/gpio-keys.h b/include/dt-bindings/input/gpio-keys.h
new file mode 100644
index 000000000000..8962df79e753
--- /dev/null
+++ b/include/dt-bindings/input/gpio-keys.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for gpio keys bindings.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_KEYS_H
+#define _DT_BINDINGS_GPIO_KEYS_H
+
+#define EV_ACT_ANY 0x00 /* asserted or deasserted */
+#define EV_ACT_ASSERTED 0x01 /* asserted */
+#define EV_ACT_DEASSERTED 0x02 /* deasserted */
+
+#endif /* _DT_BINDINGS_GPIO_KEYS_H */
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
index 8b7b7197ffd7..a90f3613c584 100644
--- a/include/dt-bindings/mfd/stm32f7-rcc.h
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -91,6 +91,7 @@
#define STM32F7_RCC_APB2_TIM8 1
#define STM32F7_RCC_APB2_USART1 4
#define STM32F7_RCC_APB2_USART6 5
+#define STM32F7_RCC_APB2_SDMMC2 7
#define STM32F7_RCC_APB2_ADC1 8
#define STM32F7_RCC_APB2_ADC2 9
#define STM32F7_RCC_APB2_ADC3 10
diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h
index 92b46d772fae..2c147817efc2 100644
--- a/include/dt-bindings/power/mt2712-power.h
+++ b/include/dt-bindings/power/mt2712-power.h
@@ -22,5 +22,8 @@
#define MT2712_POWER_DOMAIN_USB 5
#define MT2712_POWER_DOMAIN_USB2 6
#define MT2712_POWER_DOMAIN_MFG 7
+#define MT2712_POWER_DOMAIN_MFG_SC1 8
+#define MT2712_POWER_DOMAIN_MFG_SC2 9
+#define MT2712_POWER_DOMAIN_MFG_SC3 10
#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */
diff --git a/include/dt-bindings/power/mt7623a-power.h b/include/dt-bindings/power/mt7623a-power.h
new file mode 100644
index 000000000000..2544822aa76b
--- /dev/null
+++ b/include/dt-bindings/power/mt7623a-power.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DT_BINDINGS_POWER_MT7623A_POWER_H
+#define _DT_BINDINGS_POWER_MT7623A_POWER_H
+
+#define MT7623A_POWER_DOMAIN_CONN 0
+#define MT7623A_POWER_DOMAIN_ETH 1
+#define MT7623A_POWER_DOMAIN_HIF 2
+#define MT7623A_POWER_DOMAIN_IFR_MSC 3
+
+#endif /* _DT_BINDINGS_POWER_MT7623A_POWER_H */
diff --git a/include/dt-bindings/power/r8a77965-sysc.h b/include/dt-bindings/power/r8a77965-sysc.h
new file mode 100644
index 000000000000..05a4b5917314
--- /dev/null
+++ b/include/dt-bindings/power/r8a77965-sysc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#ifndef __DT_BINDINGS_POWER_R8A77965_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77965_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77965_PD_CA57_CPU0 0
+#define R8A77965_PD_CA57_CPU1 1
+#define R8A77965_PD_A3VP 9
+#define R8A77965_PD_CA57_SCU 12
+#define R8A77965_PD_CR7 13
+#define R8A77965_PD_A3VC 14
+#define R8A77965_PD_3DG_A 17
+#define R8A77965_PD_3DG_B 18
+#define R8A77965_PD_A3IR 24
+#define R8A77965_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A77965_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77965_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h
new file mode 100644
index 000000000000..2c90c1237725
--- /dev/null
+++ b/include/dt-bindings/power/r8a77980-sysc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A77980_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77980_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77980_PD_A2SC2 0
+#define R8A77980_PD_A2SC3 1
+#define R8A77980_PD_A2SC4 2
+#define R8A77980_PD_A2PD0 3
+#define R8A77980_PD_A2PD1 4
+#define R8A77980_PD_CA53_CPU0 5
+#define R8A77980_PD_CA53_CPU1 6
+#define R8A77980_PD_CA53_CPU2 7
+#define R8A77980_PD_CA53_CPU3 8
+#define R8A77980_PD_A2CN 10
+#define R8A77980_PD_A3VIP 11
+#define R8A77980_PD_A2IR5 12
+#define R8A77980_PD_CR7 13
+#define R8A77980_PD_A2IR4 15
+#define R8A77980_PD_CA53_SCU 21
+#define R8A77980_PD_A2IR0 23
+#define R8A77980_PD_A3IR 24
+#define R8A77980_PD_A3VIP1 25
+#define R8A77980_PD_A3VIP2 26
+#define R8A77980_PD_A2IR1 27
+#define R8A77980_PD_A2IR2 28
+#define R8A77980_PD_A2IR3 29
+#define R8A77980_PD_A2SC0 30
+#define R8A77980_PD_A2SC1 31
+
+/* Always-on power area */
+#define R8A77980_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77980_SYSC_H__ */
diff --git a/include/dt-bindings/power/tegra194-powergate.h b/include/dt-bindings/power/tegra194-powergate.h
new file mode 100644
index 000000000000..82253742a493
--- /dev/null
+++ b/include/dt-bindings/power/tegra194-powergate.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T194_POWERGATE_T194_H_
+#define __ABI_MACH_T194_POWERGATE_T194_H_
+
+#define TEGRA194_POWER_DOMAIN_AUD 1
+#define TEGRA194_POWER_DOMAIN_DISP 2
+#define TEGRA194_POWER_DOMAIN_DISPB 3
+#define TEGRA194_POWER_DOMAIN_DISPC 4
+#define TEGRA194_POWER_DOMAIN_ISPA 5
+#define TEGRA194_POWER_DOMAIN_NVDECA 6
+#define TEGRA194_POWER_DOMAIN_NVJPG 7
+#define TEGRA194_POWER_DOMAIN_NVENCA 8
+#define TEGRA194_POWER_DOMAIN_NVENCB 9
+#define TEGRA194_POWER_DOMAIN_NVDECB 10
+#define TEGRA194_POWER_DOMAIN_SAX 11
+#define TEGRA194_POWER_DOMAIN_VE 12
+#define TEGRA194_POWER_DOMAIN_VIC 13
+#define TEGRA194_POWER_DOMAIN_XUSBA 14
+#define TEGRA194_POWER_DOMAIN_XUSBB 15
+#define TEGRA194_POWER_DOMAIN_XUSBC 16
+#define TEGRA194_POWER_DOMAIN_PCIEX8A 17
+#define TEGRA194_POWER_DOMAIN_PCIEX4A 18
+#define TEGRA194_POWER_DOMAIN_PCIEX1A 19
+#define TEGRA194_POWER_DOMAIN_PCIEX8B 21
+#define TEGRA194_POWER_DOMAIN_PVAA 22
+#define TEGRA194_POWER_DOMAIN_PVAB 23
+#define TEGRA194_POWER_DOMAIN_DLAA 24
+#define TEGRA194_POWER_DOMAIN_DLAB 25
+#define TEGRA194_POWER_DOMAIN_CV 26
+#define TEGRA194_POWER_DOMAIN_GPU 27
+#define TEGRA194_POWER_DOMAIN_MAX 27
+
+#endif
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
new file mode 100644
index 000000000000..f0c3aaef67a0
--- /dev/null
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP1_RESET_H_
+#define _DT_BINDINGS_STM32MP1_RESET_H_
+
+#define LTDC_R 3072
+#define DSI_R 3076
+#define DDRPERFM_R 3080
+#define USBPHY_R 3088
+#define SPI6_R 3136
+#define I2C4_R 3138
+#define I2C6_R 3139
+#define USART1_R 3140
+#define STGEN_R 3156
+#define GPIOZ_R 3200
+#define CRYP1_R 3204
+#define HASH1_R 3205
+#define RNG1_R 3206
+#define AXIM_R 3216
+#define GPU_R 3269
+#define ETHMAC_R 3274
+#define FMC_R 3276
+#define QSPI_R 3278
+#define SDMMC1_R 3280
+#define SDMMC2_R 3281
+#define CRC1_R 3284
+#define USBH_R 3288
+#define MDMA_R 3328
+#define MCU_R 8225
+#define TIM2_R 19456
+#define TIM3_R 19457
+#define TIM4_R 19458
+#define TIM5_R 19459
+#define TIM6_R 19460
+#define TIM7_R 19461
+#define TIM12_R 16462
+#define TIM13_R 16463
+#define TIM14_R 16464
+#define LPTIM1_R 19465
+#define SPI2_R 19467
+#define SPI3_R 19468
+#define USART2_R 19470
+#define USART3_R 19471
+#define UART4_R 19472
+#define UART5_R 19473
+#define UART7_R 19474
+#define UART8_R 19475
+#define I2C1_R 19477
+#define I2C2_R 19478
+#define I2C3_R 19479
+#define I2C5_R 19480
+#define SPDIF_R 19482
+#define CEC_R 19483
+#define DAC12_R 19485
+#define MDIO_R 19847
+#define TIM1_R 19520
+#define TIM8_R 19521
+#define TIM15_R 19522
+#define TIM16_R 19523
+#define TIM17_R 19524
+#define SPI1_R 19528
+#define SPI4_R 19529
+#define SPI5_R 19530
+#define USART6_R 19533
+#define SAI1_R 19536
+#define SAI2_R 19537
+#define SAI3_R 19538
+#define DFSDM_R 19540
+#define FDCAN_R 19544
+#define LPTIM2_R 19584
+#define LPTIM3_R 19585
+#define LPTIM4_R 19586
+#define LPTIM5_R 19587
+#define SAI4_R 19592
+#define SYSCFG_R 19595
+#define VREF_R 19597
+#define TMPSENS_R 19600
+#define PMBCTRL_R 19601
+#define DMA1_R 19648
+#define DMA2_R 19649
+#define DMAMUX_R 19650
+#define ADC12_R 19653
+#define USBO_R 19656
+#define SDMMC3_R 19664
+#define CAMITF_R 19712
+#define CRYP2_R 19716
+#define HASH2_R 19717
+#define RNG2_R 19718
+#define CRC2_R 19719
+#define HSEM_R 19723
+#define MBOX_R 19724
+#define GPIOA_R 19776
+#define GPIOB_R 19777
+#define GPIOC_R 19778
+#define GPIOD_R 19779
+#define GPIOE_R 19780
+#define GPIOF_R 19781
+#define GPIOG_R 19782
+#define GPIOH_R 19783
+#define GPIOI_R 19784
+#define GPIOJ_R 19785
+#define GPIOK_R 19786
+
+#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */
diff --git a/include/dt-bindings/reset/tegra194-reset.h b/include/dt-bindings/reset/tegra194-reset.h
new file mode 100644
index 000000000000..473afaa25bfb
--- /dev/null
+++ b/include/dt-bindings/reset/tegra194-reset.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T194_RESET_H
+#define __ABI_MACH_T194_RESET_H
+
+#define TEGRA194_RESET_ACTMON 1
+#define TEGRA194_RESET_ADSP_ALL 2
+#define TEGRA194_RESET_AFI 3
+#define TEGRA194_RESET_CAN1 4
+#define TEGRA194_RESET_CAN2 5
+#define TEGRA194_RESET_DLA0 6
+#define TEGRA194_RESET_DLA1 7
+#define TEGRA194_RESET_DPAUX 8
+#define TEGRA194_RESET_DPAUX1 9
+#define TEGRA194_RESET_DPAUX2 10
+#define TEGRA194_RESET_DPAUX3 11
+#define TEGRA194_RESET_EQOS 17
+#define TEGRA194_RESET_GPCDMA 18
+#define TEGRA194_RESET_GPU 19
+#define TEGRA194_RESET_HDA 20
+#define TEGRA194_RESET_HDA2CODEC_2X 21
+#define TEGRA194_RESET_HDA2HDMICODEC 22
+#define TEGRA194_RESET_HOST1X 23
+#define TEGRA194_RESET_I2C1 24
+#define TEGRA194_RESET_I2C10 25
+#define TEGRA194_RESET_RSVD_26 26
+#define TEGRA194_RESET_RSVD_27 27
+#define TEGRA194_RESET_RSVD_28 28
+#define TEGRA194_RESET_I2C2 29
+#define TEGRA194_RESET_I2C3 30
+#define TEGRA194_RESET_I2C4 31
+#define TEGRA194_RESET_I2C6 32
+#define TEGRA194_RESET_I2C7 33
+#define TEGRA194_RESET_I2C8 34
+#define TEGRA194_RESET_I2C9 35
+#define TEGRA194_RESET_ISP 36
+#define TEGRA194_RESET_MIPI_CAL 37
+#define TEGRA194_RESET_MPHY_CLK_CTL 38
+#define TEGRA194_RESET_MPHY_L0_RX 39
+#define TEGRA194_RESET_MPHY_L0_TX 40
+#define TEGRA194_RESET_MPHY_L1_RX 41
+#define TEGRA194_RESET_MPHY_L1_TX 42
+#define TEGRA194_RESET_NVCSI 43
+#define TEGRA194_RESET_NVDEC 44
+#define TEGRA194_RESET_NVDISPLAY0_HEAD0 45
+#define TEGRA194_RESET_NVDISPLAY0_HEAD1 46
+#define TEGRA194_RESET_NVDISPLAY0_HEAD2 47
+#define TEGRA194_RESET_NVDISPLAY0_HEAD3 48
+#define TEGRA194_RESET_NVDISPLAY0_MISC 49
+#define TEGRA194_RESET_NVDISPLAY0_WGRP0 50
+#define TEGRA194_RESET_NVDISPLAY0_WGRP1 51
+#define TEGRA194_RESET_NVDISPLAY0_WGRP2 52
+#define TEGRA194_RESET_NVDISPLAY0_WGRP3 53
+#define TEGRA194_RESET_NVDISPLAY0_WGRP4 54
+#define TEGRA194_RESET_NVDISPLAY0_WGRP5 55
+#define TEGRA194_RESET_RSVD_56 56
+#define TEGRA194_RESET_RSVD_57 57
+#define TEGRA194_RESET_RSVD_58 58
+#define TEGRA194_RESET_NVENC 59
+#define TEGRA194_RESET_NVENC1 60
+#define TEGRA194_RESET_NVJPG 61
+#define TEGRA194_RESET_PCIE 62
+#define TEGRA194_RESET_PCIEXCLK 63
+#define TEGRA194_RESET_RSVD_64 64
+#define TEGRA194_RESET_RSVD_65 65
+#define TEGRA194_RESET_PVA0_ALL 66
+#define TEGRA194_RESET_PVA1_ALL 67
+#define TEGRA194_RESET_PWM1 68
+#define TEGRA194_RESET_PWM2 69
+#define TEGRA194_RESET_PWM3 70
+#define TEGRA194_RESET_PWM4 71
+#define TEGRA194_RESET_PWM5 72
+#define TEGRA194_RESET_PWM6 73
+#define TEGRA194_RESET_PWM7 74
+#define TEGRA194_RESET_PWM8 75
+#define TEGRA194_RESET_QSPI0 76
+#define TEGRA194_RESET_QSPI1 77
+#define TEGRA194_RESET_SATA 78
+#define TEGRA194_RESET_SATACOLD 79
+#define TEGRA194_RESET_SCE_ALL 80
+#define TEGRA194_RESET_RCE_ALL 81
+#define TEGRA194_RESET_SDMMC1 82
+#define TEGRA194_RESET_RSVD_83 83
+#define TEGRA194_RESET_SDMMC3 84
+#define TEGRA194_RESET_SDMMC4 85
+#define TEGRA194_RESET_SE 86
+#define TEGRA194_RESET_SOR0 87
+#define TEGRA194_RESET_SOR1 88
+#define TEGRA194_RESET_SOR2 89
+#define TEGRA194_RESET_SOR3 90
+#define TEGRA194_RESET_SPI1 91
+#define TEGRA194_RESET_SPI2 92
+#define TEGRA194_RESET_SPI3 93
+#define TEGRA194_RESET_SPI4 94
+#define TEGRA194_RESET_TACH 95
+#define TEGRA194_RESET_RSVD_96 96
+#define TEGRA194_RESET_TSCTNVI 97
+#define TEGRA194_RESET_TSEC 98
+#define TEGRA194_RESET_TSECB 99
+#define TEGRA194_RESET_UARTA 100
+#define TEGRA194_RESET_UARTB 101
+#define TEGRA194_RESET_UARTC 102
+#define TEGRA194_RESET_UARTD 103
+#define TEGRA194_RESET_UARTE 104
+#define TEGRA194_RESET_UARTF 105
+#define TEGRA194_RESET_UARTG 106
+#define TEGRA194_RESET_UARTH 107
+#define TEGRA194_RESET_UFSHC 108
+#define TEGRA194_RESET_UFSHC_AXI_M 109
+#define TEGRA194_RESET_UFSHC_LP_SEQ 110
+#define TEGRA194_RESET_RSVD_111 111
+#define TEGRA194_RESET_VI 112
+#define TEGRA194_RESET_VIC 113
+#define TEGRA194_RESET_XUSB_PADCTL 114
+#define TEGRA194_RESET_NVDEC1 115
+#define TEGRA194_RESET_PEX0_CORE_0 116
+#define TEGRA194_RESET_PEX0_CORE_1 117
+#define TEGRA194_RESET_PEX0_CORE_2 118
+#define TEGRA194_RESET_PEX0_CORE_3 119
+#define TEGRA194_RESET_PEX0_CORE_4 120
+#define TEGRA194_RESET_PEX0_CORE_0_APB 121
+#define TEGRA194_RESET_PEX0_CORE_1_APB 122
+#define TEGRA194_RESET_PEX0_CORE_2_APB 123
+#define TEGRA194_RESET_PEX0_CORE_3_APB 124
+#define TEGRA194_RESET_PEX0_CORE_4_APB 125
+#define TEGRA194_RESET_PEX0_COMMON_APB 126
+#define TEGRA194_RESET_PEX1_CORE_5 129
+#define TEGRA194_RESET_PEX1_CORE_5_APB 130
+#define TEGRA194_RESET_CVNAS 131
+#define TEGRA194_RESET_CVNAS_FCM 132
+#define TEGRA194_RESET_DMIC5 144
+#define TEGRA194_RESET_APE 145
+#define TEGRA194_RESET_PEX_USB_UPHY 146
+#define TEGRA194_RESET_PEX_USB_UPHY_L0 147
+#define TEGRA194_RESET_PEX_USB_UPHY_L1 148
+#define TEGRA194_RESET_PEX_USB_UPHY_L2 149
+#define TEGRA194_RESET_PEX_USB_UPHY_L3 150
+#define TEGRA194_RESET_PEX_USB_UPHY_L4 151
+#define TEGRA194_RESET_PEX_USB_UPHY_L5 152
+#define TEGRA194_RESET_PEX_USB_UPHY_L6 153
+#define TEGRA194_RESET_PEX_USB_UPHY_L7 154
+#define TEGRA194_RESET_PEX_USB_UPHY_L8 155
+#define TEGRA194_RESET_PEX_USB_UPHY_L9 156
+#define TEGRA194_RESET_PEX_USB_UPHY_L10 157
+#define TEGRA194_RESET_PEX_USB_UPHY_L11 158
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL0 159
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL1 160
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL2 161
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL3 162
+
+#endif
diff --git a/include/dt-bindings/sound/rt5651.h b/include/dt-bindings/sound/rt5651.h
new file mode 100644
index 000000000000..2f2dac915168
--- /dev/null
+++ b/include/dt-bindings/sound/rt5651.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_RT5651_H
+#define __DT_RT5651_H
+
+#define RT5651_JD_NULL 0
+#define RT5651_JD1_1 1
+#define RT5651_JD1_2 2
+#define RT5651_JD2 3
+
+#define RT5651_OVCD_SF_0P5 0
+#define RT5651_OVCD_SF_0P75 1
+#define RT5651_OVCD_SF_1P0 2
+#define RT5651_OVCD_SF_1P5 3
+
+#endif /* __DT_RT5651_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 02924ae2527e..24f03941ada8 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -57,11 +57,15 @@ struct vgic_global {
/* Physical address of vgic virtual cpu interface */
phys_addr_t vcpu_base;
- /* GICV mapping */
+ /* GICV mapping, kernel VA */
void __iomem *vcpu_base_va;
+ /* GICV mapping, HYP VA */
+ void __iomem *vcpu_hyp_va;
- /* virtual control interface mapping */
+ /* virtual control interface mapping, kernel VA */
void __iomem *vctrl_base;
+ /* virtual control interface mapping, HYP VA */
+ void __iomem *vctrl_hyp;
/* Number of implemented list registers */
int nr_lr;
@@ -209,10 +213,6 @@ struct vgic_dist {
int nr_spis;
- /* TODO: Consider moving to global state */
- /* Virtual control interface mapping */
- void __iomem *vctrl_base;
-
/* base addresses in guest physical address space: */
gpa_t vgic_dist_base; /* distributor */
union {
@@ -263,7 +263,6 @@ struct vgic_dist {
struct vgic_v2_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
- u64 vgic_elrsr; /* Saved only */
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
};
@@ -272,7 +271,6 @@ struct vgic_v3_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
- u32 vgic_elrsr; /* Saved only */
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
diff --git a/include/linux/audit.h b/include/linux/audit.h
index af410d9fbf2d..75d5b031e802 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -146,8 +146,7 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
-extern void audit_log_link_denied(const char *operation,
- const struct path *link);
+extern void audit_log_link_denied(const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
@@ -194,8 +193,7 @@ static inline void audit_log_d_path(struct audit_buffer *ab,
{ }
static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
-static inline void audit_log_link_denied(const char *string,
- const struct path *link)
+static inline void audit_log_link_denied(const char *string)
{ }
static inline int audit_log_task_context(struct audit_buffer *ab)
{
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 69bea82ebeb1..6c666fd7de3c 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -88,6 +88,7 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
+ bool offline;
};
/*
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 6338551e0fb9..9f4c17f0d2d8 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -5,6 +5,7 @@
struct blk_mq_tag_set;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
+int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+ int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index bf18b95ed92d..17b18b91ebac 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *);
/*
* Block error status values. See block/blk-core:blk_errors for the details.
+ * Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
+#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
+typedef u32 __bitwise blk_status_t;
+#else
typedef u8 __bitwise blk_status_t;
+#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ed63f3b69c12..9af3e0f430bc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -707,73 +707,10 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
-}
-
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_in_flight(struct request_queue *q)
-{
- return q->in_flight[0] + q->in_flight[1];
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
-}
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
@@ -804,6 +741,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q);
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
static inline bool blk_account_rq(struct request *rq)
{
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
@@ -1080,6 +1022,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
}
/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
* blk_rq_cur_bytes() : bytes left in the current segment
@@ -1106,12 +1061,12 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq);
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
- return blk_rq_bytes(rq) >> 9;
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
}
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{
- return blk_rq_cur_bytes(rq) >> 9;
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
}
static inline unsigned int blk_rq_zone_no(struct request *rq)
@@ -1141,7 +1096,8 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ return min(q->limits.max_discard_sectors,
+ UINT_MAX >> SECTOR_SHIFT);
if (unlikely(op == REQ_OP_WRITE_SAME))
return q->limits.max_write_same_sectors;
@@ -1321,7 +1277,8 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+ spinlock_t *lock);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
@@ -1452,16 +1409,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
+ return blkdev_issue_discard(sb->s_bdev,
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
gfp_mask, flags);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
{
return blkdev_issue_zeroout(sb->s_bdev,
- block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
gfp_mask, 0);
}
@@ -1568,7 +1530,8 @@ static inline int queue_alignment_offset(struct request_queue *q)
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+ unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
+ << SECTOR_SHIFT;
return (granularity + lim->alignment_offset - alignment) % granularity;
}
@@ -1602,8 +1565,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return 0;
/* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> 9;
- granularity = lim->discard_granularity >> 9;
+ alignment = lim->discard_alignment >> SECTOR_SHIFT;
+ granularity = lim->discard_granularity >> SECTOR_SHIFT;
if (!granularity)
return 0;
@@ -1614,7 +1577,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
offset = (granularity + alignment - offset) % granularity;
/* Turn it back into bytes, gaah */
- return offset << 9;
+ return offset << SECTOR_SHIFT;
}
static inline int bdev_discard_alignment(struct block_device *bdev)
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index b1be0233ce35..28a7ccc55c89 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -38,12 +38,12 @@ struct bsg_buffer {
};
struct bsg_job {
- struct scsi_request sreq;
struct device *dev;
- struct request *req;
struct kref kref;
+ unsigned int timeout;
+
/* Transport/driver specific request/reply structs */
void *request;
void *reply;
@@ -63,6 +63,9 @@ struct bsg_job {
struct bsg_buffer request_payload;
struct bsg_buffer reply_payload;
+ int result;
+ unsigned int reply_payload_rcv_len;
+
void *dd_data; /* Used for driver-specific storage */
};
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 2a202e41a3af..0c7dd9ceb139 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,34 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef BSG_H
-#define BSG_H
+#ifndef _LINUX_BSG_H
+#define _LINUX_BSG_H
#include <uapi/linux/bsg.h>
+struct request;
+
+#ifdef CONFIG_BLK_DEV_BSG
+struct bsg_ops {
+ int (*check_proto)(struct sg_io_v4 *hdr);
+ int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
+ fmode_t mode);
+ int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
+ void (*free_rq)(struct request *rq);
+};
-#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device {
struct device *class_dev;
struct device *parent;
int minor;
struct request_queue *queue;
struct kref ref;
+ const struct bsg_ops *ops;
void (*release)(struct device *);
};
-extern int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *));
-extern void bsg_unregister_queue(struct request_queue *);
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+ const char *name, const struct bsg_ops *ops,
+ void (*release)(struct device *));
+int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
+void bsg_unregister_queue(struct request_queue *q);
#else
-static inline int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *))
+static inline int bsg_scsi_register_queue(struct request_queue *q,
+ struct device *parent)
{
return 0;
}
static inline void bsg_unregister_queue(struct request_queue *q)
{
}
-#endif
-
-#endif
+#endif /* CONFIG_BLK_DEV_BSG */
+#endif /* _LINUX_BSG_H */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index d18da839b810..7e3bceee3489 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -203,6 +203,7 @@ enum {
TI_CLKM_PRM,
TI_CLKM_SCRM,
TI_CLKM_CTRL,
+ TI_CLKM_CTRL_AUX,
TI_CLKM_PLLSS,
CLK_MAX_MEMMAPS
};
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9847c5a013c3..f188eab10570 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -226,6 +226,8 @@ typedef struct compat_siginfo {
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
+#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
+ sizeof(short) : __alignof__(compat_uptr_t))
union {
/*
* used when si_code=BUS_MCEERR_AR or
@@ -234,13 +236,13 @@ typedef struct compat_siginfo {
short int _addr_lsb; /* Valid LSB of the reported address. */
/* used when si_code=SEGV_BNDERR */
struct {
- compat_uptr_t _dummy_bnd;
+ char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD];
compat_uptr_t _lower;
compat_uptr_t _upper;
} _addr_bnd;
/* used when si_code=SEGV_PKUERR */
struct {
- compat_uptr_t _dummy_pkey;
+ char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
u32 _pkey;
} _addr_pkey;
};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index da83f64952e7..31fef7c34185 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -87,10 +87,10 @@ typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
unsigned status_flags, char *result, unsigned maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
+ char *result, unsigned maxlen);
-typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
- struct block_device **bdev, fmode_t *mode);
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
/*
* These iteration functions are typically used to check (and combine)
@@ -267,6 +267,12 @@ struct dm_target {
unsigned num_discard_bios;
/*
+ * The number of secure erase bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_secure_erase_bios;
+
+ /*
* The number of WRITE SAME bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
@@ -542,8 +548,6 @@ do { \
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
-#define SECTOR_SHIFT 9
-
/*
* Definitions of return values from target end_io function.
*/
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
new file mode 100644
index 000000000000..3c8b7d274bd9
--- /dev/null
+++ b/include/linux/dm-bufio.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2009-2011 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_BUFIO_H
+#define _LINUX_DM_BUFIO_H
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+
+/*----------------------------------------------------------------*/
+
+struct dm_bufio_client;
+struct dm_buffer;
+
+/*
+ * Create a buffered IO cache on a given device
+ */
+struct dm_bufio_client *
+dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+ unsigned reserved_buffers, unsigned aux_size,
+ void (*alloc_callback)(struct dm_buffer *),
+ void (*write_callback)(struct dm_buffer *));
+
+/*
+ * Release a buffered IO cache.
+ */
+void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
+/*
+ * Set the sector range.
+ * When this function is called, there must be no I/O in progress on the bufio
+ * client.
+ */
+void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
+
+/*
+ * WARNING: to avoid deadlocks, these conditions are observed:
+ *
+ * - At most one thread can hold at most "reserved_buffers" simultaneously.
+ * - Each other threads can hold at most one buffer.
+ * - Threads which call only dm_bufio_get can hold unlimited number of
+ * buffers.
+ */
+
+/*
+ * Read a given block from disk. Returns pointer to data. Returns a
+ * pointer to dm_buffer that can be used to release the buffer or to make
+ * it dirty.
+ */
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but return buffer from cache, don't read
+ * it. If the buffer is not in the cache, return NULL.
+ */
+void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but don't read anything from the disk. It is
+ * expected that the caller initializes the buffer and marks it dirty.
+ */
+void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Prefetch the specified blocks to the cache.
+ * The function starts to read the blocks and returns without waiting for
+ * I/O to finish.
+ */
+void dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned n_blocks);
+
+/*
+ * Release a reference obtained with dm_bufio_{read,get,new}. The data
+ * pointer and dm_buffer pointer is no longer valid after this call.
+ */
+void dm_bufio_release(struct dm_buffer *b);
+
+/*
+ * Mark a buffer dirty. It should be called after the buffer is modified.
+ *
+ * In case of memory pressure, the buffer may be written after
+ * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
+ * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
+ * the actual writing may occur earlier.
+ */
+void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+
+/*
+ * Mark a part of the buffer dirty.
+ *
+ * The specified part of the buffer is scheduled to be written. dm-bufio may
+ * write the specified part of the buffer or it may write a larger superset.
+ */
+void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+ unsigned start, unsigned end);
+
+/*
+ * Initiate writing of dirty buffers, without waiting for completion.
+ */
+void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
+
+/*
+ * Write all dirty buffers. Guarantees that all dirty buffers created prior
+ * to this call are on disk when this call exits.
+ */
+int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
+
+/*
+ * Send an empty write barrier to the device to flush hardware disk cache.
+ */
+int dm_bufio_issue_flush(struct dm_bufio_client *c);
+
+/*
+ * Like dm_bufio_release but also move the buffer to the new
+ * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
+ */
+void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
+
+/*
+ * Free the given buffer.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
+
+/*
+ * Set the minimum number of buffers before cleanup happens.
+ */
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
+
+unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+void *dm_bufio_get_block_data(struct dm_buffer *b);
+void *dm_bufio_get_aux_data(struct dm_buffer *b);
+struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 12fedcba9a9a..f8ab1c0f589e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -212,14 +212,14 @@ static inline void set_dma_ops(struct device *dev,
}
#else
/*
- * Define the dma api to allow compilation but not linking of
- * dma dependent code. Code that depends on the dma-mapping
- * API needs to set 'depends on HAS_DMA' in its Kconfig
+ * Define the dma api to allow compilation of dma dependent code.
+ * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
+ * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
+ * where <something> guarantuees the availability of the dma-mapping API.
*/
-extern const struct dma_map_ops bad_dma_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- return &bad_dma_ops;
+ return NULL;
}
#endif
@@ -772,10 +772,19 @@ static inline void dma_deconfigure(struct device *dev) {}
/*
* Managed DMA API
*/
+#ifdef CONFIG_HAS_DMA
extern void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
+#else /* !CONFIG_HAS_DMA */
+static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{ return NULL; }
+static inline void dmam_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle) { }
+#endif /* !CONFIG_HAS_DMA */
+
extern void *dmam_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 53ba737505df..f632ecfb4238 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -16,6 +16,8 @@
struct device;
+#ifdef CONFIG_HAS_DMA
+
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
@@ -23,13 +25,6 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
-
-static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
-{
- return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
-}
-
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
@@ -39,5 +34,26 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
void dmam_pool_destroy(struct dma_pool *pool);
+#else /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dma_pool_destroy(struct dma_pool *pool) { }
+static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle) { return NULL; }
+static inline void dma_pool_free(struct dma_pool *pool, void *vaddr,
+ dma_addr_t addr) { }
+static inline struct dma_pool *dmam_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dmam_pool_destroy(struct dma_pool *pool) { }
+#endif /* !CONFIG_HAS_DMA */
+
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
#endif
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 6a86d8db16d9..c46fdb36700b 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -114,6 +114,7 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+extern u64 dmi_memdev_size(u16 handle);
#else
@@ -144,6 +145,7 @@ static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
+static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/edac.h b/include/linux/edac.h
index cd75c173fd00..bffb97828ed6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -186,6 +186,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_NVDIMM: Non-volatile RAM
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -209,6 +210,7 @@ enum mem_type {
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_NVDIMM,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -231,6 +233,7 @@ enum mem_type {
#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
/**
* enum edac-type - Error Detection and Correction capabilities and mode
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 58aecb60ea51..aa5db8b5521a 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,6 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
+#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
@@ -38,15 +39,14 @@
#define F2FS_MAX_QUOTAS 3
-#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
+#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
+#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
+#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
-#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
/*
* For further optimization on multi-head logs, on-disk layout supports maximum
@@ -102,7 +102,7 @@ struct f2fs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
- __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+ __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */
__le32 cp_payload;
__u8 version[VERSION_LEN]; /* the kernel version */
__u8 init_version[VERSION_LEN]; /* the initial kernel version */
@@ -111,12 +111,14 @@ struct f2fs_super_block {
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
- __u8 reserved[315]; /* valid reserved region */
+ __u8 hot_ext_count; /* # of hot file extension */
+ __u8 reserved[314]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
@@ -303,6 +305,10 @@ struct f2fs_node {
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
+#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
+ ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
+ BITS_PER_LONG * BITS_PER_LONG)
+
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index c3c95d18bf43..7e6c77740413 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -64,10 +64,11 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
struct kmem_cache;
+int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
+extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c6baf767619e..1ee7f592e239 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1321,6 +1321,8 @@ extern int send_sigurg(struct fown_struct *fown);
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
+#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
+#define SB_I_UNTRUSTED_MOUNTER 0x00000040
/* Possible states of 'frozen' field */
enum {
@@ -2015,7 +2017,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define I_WB_SWITCH (1 << 13)
#define I_OVL_INUSE (1 << 14)
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
extern void __mark_inode_dirty(struct inode *, int);
@@ -2381,8 +2384,8 @@ struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- struct audit_names *aname;
int refcnt;
+ struct audit_names *aname;
const char iname[];
};
@@ -2977,12 +2980,6 @@ enum {
/* filesystem does not support filling holes */
DIO_SKIP_HOLES = 0x02,
-
- /* filesystem can handle aio writes beyond i_size */
- DIO_ASYNC_EXTEND = 0x04,
-
- /* inode/fs/bdev does not need truncate protection */
- DIO_SKIP_DIO_COUNT = 0x08,
};
void dio_end_io(struct bio *bio);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 3b03e29e2f1a..34cf0fdd7dc7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -29,6 +29,18 @@ struct fscache_cache_ops;
struct fscache_object;
struct fscache_operation;
+enum fscache_obj_ref_trace {
+ fscache_obj_get_add_to_deps,
+ fscache_obj_get_queue,
+ fscache_obj_put_alloc_fail,
+ fscache_obj_put_attach_fail,
+ fscache_obj_put_drop_obj,
+ fscache_obj_put_enq_dep,
+ fscache_obj_put_queue,
+ fscache_obj_put_work,
+ fscache_obj_ref__nr_traces
+};
+
/*
* cache tag definition
*/
@@ -123,7 +135,8 @@ extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_op_complete(struct fscache_operation *, bool);
extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_operation *,
+extern void fscache_operation_init(struct fscache_cookie *,
+ struct fscache_operation *,
fscache_operation_processor_t,
fscache_operation_cancel_t,
fscache_operation_release_t);
@@ -185,7 +198,7 @@ static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
{
atomic_sub(n_pages, &op->n_pages);
if (atomic_read(&op->n_pages) <= 0)
- fscache_op_complete(&op->op, true);
+ fscache_op_complete(&op->op, false);
}
/**
@@ -231,7 +244,8 @@ struct fscache_cache_ops {
void (*lookup_complete)(struct fscache_object *object);
/* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object);
+ struct fscache_object *(*grab_object)(struct fscache_object *object,
+ enum fscache_obj_ref_trace why);
/* pin an object in the cache */
int (*pin_object)(struct fscache_object *object);
@@ -254,7 +268,8 @@ struct fscache_cache_ops {
void (*drop_object)(struct fscache_object *object);
/* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object);
+ void (*put_object)(struct fscache_object *object,
+ enum fscache_obj_ref_trace why);
/* sync a cache */
void (*sync_cache)(struct fscache_cache *cache);
@@ -538,7 +553,8 @@ extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
- uint16_t datalen);
+ uint16_t datalen,
+ loff_t object_size);
extern void fscache_object_retrying_stale(struct fscache_object *object);
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index fe0c349684fa..84b90a79d75a 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
+#include <linux/list_bl.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
#define fscache_available() (1)
@@ -83,45 +84,15 @@ struct fscache_cookie_def {
const void *parent_netfs_data,
const void *cookie_netfs_data);
- /* get an index key
- * - should store the key data in the buffer
- * - should return the amount of data stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_key)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
- /* get certain file attributes from the netfs data
- * - this function can be absent for an index
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
-
- /* get the auxiliary data from netfs data
- * - this function can be absent if the index carries no state data
- * - should store the auxiliary data in the buffer
- * - should return the amount of amount stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_aux)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
/* consult the netfs about the state of an object
* - this function can be absent if the index carries no state data
* - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data
+ * presented, as is the auxiliary data and the object size
*/
enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
const void *data,
- uint16_t datalen);
+ uint16_t datalen,
+ loff_t object_size);
/* get an extra reference on a read context
* - this function can be absent if the completion function doesn't
@@ -154,7 +125,6 @@ struct fscache_netfs {
uint32_t version; /* indexing version */
const char *name; /* filesystem name */
struct fscache_cookie *primary_index;
- struct list_head link; /* internal link */
};
/*
@@ -173,6 +143,7 @@ struct fscache_cookie {
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
+ struct hlist_bl_node hash_link; /* Link in hash table */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
@@ -186,6 +157,22 @@ struct fscache_cookie {
#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */
+#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */
+#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */
+
+ u8 type; /* Type of object */
+ u8 key_len; /* Length of index key */
+ u8 aux_len; /* Length of auxiliary data */
+ u32 key_hash; /* Hash of parent, type, key, len */
+ union {
+ void *key; /* Index key */
+ u8 inline_key[16]; /* - If the key is short enough */
+ };
+ union {
+ void *aux; /* Auxiliary data */
+ u8 inline_aux[8]; /* - If the aux data is short enough */
+ };
};
static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
@@ -208,10 +195,12 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
extern struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *,
const struct fscache_cookie_def *,
- void *, bool);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *);
-extern void __fscache_update_cookie(struct fscache_cookie *);
+ const void *, size_t,
+ const void *, size_t,
+ void *, loff_t, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool);
+extern int __fscache_check_consistency(struct fscache_cookie *, const void *);
+extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
extern int __fscache_attr_changed(struct fscache_cookie *);
extern void __fscache_invalidate(struct fscache_cookie *);
extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
@@ -228,7 +217,7 @@ extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
void *,
gfp_t);
extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
+extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
@@ -238,8 +227,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
struct inode *);
extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
struct list_head *pages);
-extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *,
+extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
bool (*)(void *), void *);
/**
@@ -317,8 +306,13 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
* fscache_acquire_cookie - Acquire a cookie to represent a cache object
* @parent: The cookie that's to be the parent of this one
* @def: A description of the cache object, including callback operations
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
* @netfs_data: An arbitrary piece of data to be kept in the cookie to
* represent the cache object to the netfs
+ * @object_size: The initial size of object
* @enable: Whether or not to enable a data cookie immediately
*
* This function is used to inform FS-Cache about part of an index hierarchy
@@ -332,12 +326,19 @@ static inline
struct fscache_cookie *fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
void *netfs_data,
+ loff_t object_size,
bool enable)
{
if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def, netfs_data,
- enable);
+ return __fscache_acquire_cookie(parent, def,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ netfs_data, object_size, enable);
else
return NULL;
}
@@ -346,36 +347,44 @@ struct fscache_cookie *fscache_acquire_cookie(
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
- * associated cache object if retire is set to true.
+ * associated cache object if retire is set to true. The opportunity is
+ * provided to update the auxiliary data in the cache before the object is
+ * disconnected.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ bool retire)
{
if (fscache_cookie_valid(cookie))
- __fscache_relinquish_cookie(cookie, retire);
+ __fscache_relinquish_cookie(cookie, aux_data, retire);
}
/**
- * fscache_check_consistency - Request that if the cache is updated
+ * fscache_check_consistency - Request validation of a cache's auxiliary data
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
*
- * Request an consistency check from fscache, which passes the request
- * to the backing cache.
+ * Request an consistency check from fscache, which passes the request to the
+ * backing cache. The auxiliary data on the cookie will be updated first if
+ * @aux_data is set.
*
* Returns 0 if consistent and -ESTALE if inconsistent. May also
* return -ENOMEM and -ERESTARTSYS.
*/
static inline
-int fscache_check_consistency(struct fscache_cookie *cookie)
+int fscache_check_consistency(struct fscache_cookie *cookie,
+ const void *aux_data)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie);
+ return __fscache_check_consistency(cookie, aux_data);
else
return 0;
}
@@ -383,18 +392,20 @@ int fscache_check_consistency(struct fscache_cookie *cookie)
/**
* fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
*
* Request an update of the index data for the cache object associated with the
- * cookie.
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie);
+ __fscache_update_cookie(cookie, aux_data);
}
/**
@@ -648,6 +659,7 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie,
* fscache_write_page - Request storage of a page in the cache
* @cookie: The cookie representing the cache object
* @page: The netfs page to store
+ * @object_size: Updated size of object
* @gfp: The conditions under which memory allocation should be made
*
* Request the contents of the netfs page be written into the cache. This
@@ -665,10 +677,11 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie,
static inline
int fscache_write_page(struct fscache_cookie *cookie,
struct page *page,
+ loff_t object_size,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, gfp);
+ return __fscache_write_page(cookie, page, object_size, gfp);
else
return -ENOBUFS;
}
@@ -780,6 +793,7 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
/**
* fscache_disable_cookie - Disable a cookie
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @invalidate: Invalidate the backing object
*
* Disable a cookie from accepting further alloc, read, write, invalidate,
@@ -790,34 +804,44 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
*
* If @invalidate is set, then the backing object will be invalidated and
* detached, otherwise it will just be detached.
+ *
+ * If @aux_data is set, then auxiliary data will be updated from that.
*/
static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+void fscache_disable_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ bool invalidate)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, invalidate);
+ __fscache_disable_cookie(cookie, aux_data, invalidate);
}
/**
* fscache_enable_cookie - Reenable a cookie
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: Current size of object
* @can_enable: A function to permit enablement once lock is held
* @data: Data for can_enable()
*
* Reenable a previously disabled cookie, allowing it to accept further alloc,
* read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object.
+ * made to immediately reattach the cookie to a backing object. If @aux_data
+ * is set, the auxiliary data attached to the cookie will be updated.
*
* The can_enable() function is called (if not NULL) once the enablement lock
* is held to rule on whether enablement is still permitted to go ahead.
*/
static inline
void fscache_enable_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ loff_t object_size,
bool (*can_enable)(void *data),
void *data)
{
if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, can_enable, data);
+ __fscache_enable_cookie(cookie, aux_data, object_size,
+ can_enable, data);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 067d52e95f02..9f1edb92c97e 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -331,6 +331,12 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
+/* Queue overflow event to a notification group */
+static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
+{
+ fsnotify_add_event(group, group->overflow_event, NULL);
+}
+
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 1ba9a331ec51..5382b5183b7e 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -288,6 +288,21 @@ struct gpio_chip {
struct gpio_irq_chip irq;
#endif
+ /**
+ * @need_valid_mask:
+ *
+ * If set core allocates @valid_mask with all bits set to one.
+ */
+ bool need_valid_mask;
+
+ /**
+ * @valid_mask:
+ *
+ * If not %NULL holds bitmask of GPIOs which are valid to be used
+ * from the chip.
+ */
+ unsigned long *valid_mask;
+
#if defined(CONFIG_OF_GPIO)
/*
* If CONFIG_OF is enabled, then all GPIO controllers described in the
@@ -384,6 +399,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
/* Sleep persistence inquiry for drivers */
bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset);
/* get driver data */
void *gpiochip_get_data(struct gpio_chip *chip);
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index d06bf77400f1..7160df54a6fe 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -13,6 +13,7 @@ struct device;
* @desc: label that will be attached to button's gpio
* @type: input event type (%EV_KEY, %EV_SW, %EV_ABS)
* @wakeup: configure the button as a wake-up source
+ * @wakeup_event_action: event action to trigger wakeup
* @debounce_interval: debounce ticks interval in msecs
* @can_disable: %true indicates that userspace is allowed to
* disable button via sysfs
@@ -26,6 +27,7 @@ struct gpio_keys_button {
const char *desc;
unsigned int type;
int wakeup;
+ int wakeup_event_action;
int debounce_interval;
bool can_disable;
int value;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 091a81cf330f..8da3e1f48195 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -26,6 +26,7 @@
#define __HID_H
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -310,13 +311,13 @@ struct hid_item {
* HID connect requests
*/
-#define HID_CONNECT_HIDINPUT 0x01
-#define HID_CONNECT_HIDINPUT_FORCE 0x02
-#define HID_CONNECT_HIDRAW 0x04
-#define HID_CONNECT_HIDDEV 0x08
-#define HID_CONNECT_HIDDEV_FORCE 0x10
-#define HID_CONNECT_FF 0x20
-#define HID_CONNECT_DRIVER 0x40
+#define HID_CONNECT_HIDINPUT BIT(0)
+#define HID_CONNECT_HIDINPUT_FORCE BIT(1)
+#define HID_CONNECT_HIDRAW BIT(2)
+#define HID_CONNECT_HIDDEV BIT(3)
+#define HID_CONNECT_HIDDEV_FORCE BIT(4)
+#define HID_CONNECT_FF BIT(5)
+#define HID_CONNECT_DRIVER BIT(6)
#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
HID_CONNECT_HIDDEV|HID_CONNECT_FF)
@@ -329,25 +330,25 @@ struct hid_item {
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT 0x00000001
-#define HID_QUIRK_NOTOUCH 0x00000002
-#define HID_QUIRK_IGNORE 0x00000004
-#define HID_QUIRK_NOGET 0x00000008
-#define HID_QUIRK_HIDDEV_FORCE 0x00000010
-#define HID_QUIRK_BADPAD 0x00000020
-#define HID_QUIRK_MULTI_INPUT 0x00000040
-#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
-#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
-/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
-#define HID_QUIRK_ALWAYS_POLL 0x00000400
-#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
-#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
-#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
-#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000
-#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
-#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
-#define HID_QUIRK_NO_IGNORE 0x40000000
-#define HID_QUIRK_NO_INPUT_SYNC 0x80000000
+#define HID_QUIRK_INVERT BIT(0)
+#define HID_QUIRK_NOTOUCH BIT(1)
+#define HID_QUIRK_IGNORE BIT(2)
+#define HID_QUIRK_NOGET BIT(3)
+#define HID_QUIRK_HIDDEV_FORCE BIT(4)
+#define HID_QUIRK_BADPAD BIT(5)
+#define HID_QUIRK_MULTI_INPUT BIT(6)
+#define HID_QUIRK_HIDINPUT_FORCE BIT(7)
+/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
+/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
+#define HID_QUIRK_ALWAYS_POLL BIT(10)
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
+#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
+#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
+#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
+#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
+#define HID_QUIRK_NO_IGNORE BIT(30)
+#define HID_QUIRK_NO_INPUT_SYNC BIT(31)
/*
* HID device groups
@@ -494,13 +495,13 @@ struct hid_output_fifo {
char *raw_report;
};
-#define HID_CLAIMED_INPUT 1
-#define HID_CLAIMED_HIDDEV 2
-#define HID_CLAIMED_HIDRAW 4
-#define HID_CLAIMED_DRIVER 8
+#define HID_CLAIMED_INPUT BIT(0)
+#define HID_CLAIMED_HIDDEV BIT(1)
+#define HID_CLAIMED_HIDRAW BIT(2)
+#define HID_CLAIMED_DRIVER BIT(3)
-#define HID_STAT_ADDED 1
-#define HID_STAT_PARSED 2
+#define HID_STAT_ADDED BIT(0)
+#define HID_STAT_PARSED BIT(1)
struct hid_input {
struct list_head list;
@@ -686,8 +687,6 @@ struct hid_usage_id {
* @input_mapped: invoked on input registering after mapping an usage
* @input_configured: invoked just before the device is registered
* @feature_mapping: invoked on feature registering
- * @bus_add_driver: invoked when a HID driver is about to be added
- * @bus_removed_driver: invoked when a HID driver has been removed
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -742,8 +741,6 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
- void (*bus_add_driver)(struct hid_driver *driver);
- void (*bus_removed_driver)(struct hid_driver *driver);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
@@ -851,7 +848,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
+int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
@@ -1102,13 +1099,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*
* @report: the report we want to know the length
*/
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
{
/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
return ((report->size - 1) >> 3) + 1 + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/* HID quirks API */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index ceb751987c40..e5fd2707b6df 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -29,6 +29,7 @@ enum hwmon_sensor_types {
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
+ hwmon_max,
};
enum hwmon_chip_attributes {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2048f3c3b68a..192ed8fbc403 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -26,7 +26,6 @@
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
-#include <uapi/asm/hyperv.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h
index 0e5f7c77d1d8..c37329432a8e 100644
--- a/include/linux/i2c-pca-platform.h
+++ b/include/linux/i2c-pca-platform.h
@@ -3,9 +3,6 @@
#define I2C_PCA9564_PLATFORM_H
struct i2c_pca9564_pf_platform_data {
- int gpio; /* pin to reset chip. driver will work when
- * not supplied (negative value), but it
- * cannot exit some error conditions then */
int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
int timeout; /* timeout in jiffies */
};
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419a38e7c315..44ad14e016b5 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -47,6 +47,7 @@ struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
+struct i2c_device_identity;
union i2c_smbus_data;
struct i2c_board_info;
enum i2c_slave_event;
@@ -186,8 +187,37 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
extern s32
i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 command, u8 length, u8 *values);
+int i2c_get_device_id(const struct i2c_client *client,
+ struct i2c_device_identity *id);
#endif /* I2C */
+/**
+ * struct i2c_device_identity - i2c client device identification
+ * @manufacturer_id: 0 - 4095, database maintained by NXP
+ * @part_id: 0 - 511, according to manufacturer
+ * @die_revision: 0 - 7, according to manufacturer
+ */
+struct i2c_device_identity {
+ u16 manufacturer_id;
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3
+#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4
+#define I2C_DEVICE_ID_ANALOG_DEVICES 5
+#define I2C_DEVICE_ID_STMICROELECTRONICS 6
+#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7
+#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8
+#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9
+#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10
+#define I2C_DEVICE_ID_FLIR 11
+#define I2C_DEVICE_ID_O2MICRO 12
+#define I2C_DEVICE_ID_ATMEL 13
+#define I2C_DEVICE_ID_NONE 0xffff
+ u16 part_id;
+ u8 die_revision;
+};
+
enum i2c_alert_protocol {
I2C_PROTOCOL_SMBUS_ALERT,
I2C_PROTOCOL_SMBUS_HOST_NOTIFY,
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 1d6f16110eae..ca9d34feb572 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -160,7 +160,6 @@ struct ide_io_ports {
*/
#define PARTN_BITS 6 /* number of minor dev bits for partitions */
#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-#define SECTOR_SIZE 512
/*
* Timeouts for various operations:
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 636ebe87e6f8..97defc1139e9 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -59,5 +59,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
+extern bool inet_addr_is_any(struct sockaddr *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d6459bd1376d..de784fd11d12 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
- size_t *size,
+ unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3cbf3cfff4f0..52b70894eaa5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -792,41 +792,59 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
/*
- * min()/max()/clamp() macros that also do
- * strict type-checking.. See the
- * "unnecessary" pointer comparison.
+ * min()/max()/clamp() macros must accomplish three things:
+ *
+ * - avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+ * - perform strict type-checking (to generate warnings instead of
+ * nasty runtime surprises). See the "unnecessary" pointer comparison
+ * in __typecheck().
+ * - retain result as a constant expressions when called with only
+ * constant expressions (to avoid tripping VLA warnings in stack
+ * allocation usage).
+ */
+#define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
*/
-#define __min(t1, t2, min1, min2, x, y) ({ \
- t1 min1 = (x); \
- t2 min2 = (y); \
- (void) (&min1 == &min2); \
- min1 < min2 ? min1 : min2; })
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+#define __no_side_effects(x, y) \
+ (__is_constexpr(x) && __is_constexpr(y))
+
+#define __safe_cmp(x, y) \
+ (__typecheck(x, y) && __no_side_effects(x, y))
+
+#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+
+#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ __cmp(unique_x, unique_y, op); })
+
+#define __careful_cmp(x, y, op) \
+ __builtin_choose_expr(__safe_cmp(x, y), \
+ __cmp(x, y, op), \
+ __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define min(x, y) \
- __min(typeof(x), typeof(y), \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-#define __max(t1, t2, max1, max2, x, y) ({ \
- t1 max1 = (x); \
- t2 max2 = (y); \
- (void) (&max1 == &max2); \
- max1 > max2 ? max1 : max2; })
+#define min(x, y) __careful_cmp(x, y, <)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define max(x, y) \
- __max(typeof(x), typeof(y), \
- __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
- x, y)
+#define max(x, y) __careful_cmp(x, y, >)
/**
* min3 - return minimum of three values
@@ -878,10 +896,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @x: first value
* @y: second value
*/
-#define min_t(type, x, y) \
- __min(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
+#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
/**
* max_t - return maximum of two values, using the specified type
@@ -889,10 +904,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @x: first value
* @y: second value
*/
-#define max_t(type, x, y) \
- __max(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
+#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
/**
* clamp_t - return a value clamped to a given range using a given type
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 51f6ef2c2ff4..f23b90b02898 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -9,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature)
{
return !!(kvm_arch_para_features() & (1UL << feature));
}
+
+static inline bool kvm_para_has_hint(unsigned int feature)
+{
+ return !!(kvm_arch_para_hints() & (1UL << feature));
+}
#endif /* __LINUX_KVM_PARA_H */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 14997285e53d..c6ac1fe7ec68 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBFDT_ENV_H
-#define _LIBFDT_ENV_H
+#ifndef LIBFDT_ENV_H
+#define LIBFDT_ENV_H
#include <linux/string.h>
@@ -15,4 +15,4 @@ typedef __be64 fdt64_t;
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
-#endif /* _LIBFDT_ENV_H */
+#endif /* LIBFDT_ENV_H */
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 4ad06e824f76..5f18fe02ae37 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -10,7 +10,13 @@
* the Free Software Foundation.
*/
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#define PS2_CMD_SETSCALE11 0x00e6
+#define PS2_CMD_SETRES 0x10e8
#define PS2_CMD_GETID 0x02f2
#define PS2_CMD_RESET_BAT 0x02ff
@@ -20,11 +26,12 @@
#define PS2_RET_NAK 0xfe
#define PS2_RET_ERR 0xfc
-#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD 2 /* Waiting for command to finish */
-#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */
-#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */
+#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */
+#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */
+#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */
+#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */
+#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */
+#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */
struct ps2dev {
struct serio *serio;
@@ -36,21 +43,22 @@ struct ps2dev {
wait_queue_head_t wait;
unsigned long flags;
- unsigned char cmdbuf[8];
- unsigned char cmdcnt;
- unsigned char nak;
+ u8 cmdbuf[8];
+ u8 cmdcnt;
+ u8 nak;
};
void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
-int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
-void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
+int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
+void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
void ps2_end_command(struct ps2dev *ps2dev);
-int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
-int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data);
+int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
+bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data);
+bool ps2_handle_response(struct ps2dev *ps2dev, u8 data);
void ps2_cmd_aborted(struct ps2dev *ps2dev);
-int ps2_is_keyboard_id(char id);
+bool ps2_is_keyboard_id(u8 id);
#endif /* _LIBPS2_H */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 7f4b60abdf27..6e0859b9d4d2 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -16,26 +16,58 @@ enum {
NVM_IOTYPE_GC = 1,
};
-#define NVM_BLK_BITS (16)
-#define NVM_PG_BITS (16)
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (8)
-#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (7)
+/* common format */
+#define NVM_GEN_CH_BITS (8)
+#define NVM_GEN_LUN_BITS (8)
+#define NVM_GEN_BLK_BITS (16)
+#define NVM_GEN_RESERVED (32)
+
+/* 1.2 format */
+#define NVM_12_PG_BITS (16)
+#define NVM_12_PL_BITS (4)
+#define NVM_12_SEC_BITS (4)
+#define NVM_12_RESERVED (8)
+
+/* 2.0 format */
+#define NVM_20_SEC_BITS (24)
+#define NVM_20_RESERVED (8)
+
+enum {
+ NVM_OCSSD_SPEC_12 = 12,
+ NVM_OCSSD_SPEC_20 = 20,
+};
struct ppa_addr {
/* Generic structure for all addresses */
union {
+ /* generic device format */
+ struct {
+ u64 ch : NVM_GEN_CH_BITS;
+ u64 lun : NVM_GEN_LUN_BITS;
+ u64 blk : NVM_GEN_BLK_BITS;
+ u64 reserved : NVM_GEN_RESERVED;
+ } a;
+
+ /* 1.2 device format */
struct {
- u64 blk : NVM_BLK_BITS;
- u64 pg : NVM_PG_BITS;
- u64 sec : NVM_SEC_BITS;
- u64 pl : NVM_PL_BITS;
- u64 lun : NVM_LUN_BITS;
- u64 ch : NVM_CH_BITS;
- u64 reserved : 1;
+ u64 ch : NVM_GEN_CH_BITS;
+ u64 lun : NVM_GEN_LUN_BITS;
+ u64 blk : NVM_GEN_BLK_BITS;
+ u64 pg : NVM_12_PG_BITS;
+ u64 pl : NVM_12_PL_BITS;
+ u64 sec : NVM_12_SEC_BITS;
+ u64 reserved : NVM_12_RESERVED;
} g;
+ /* 2.0 device format */
+ struct {
+ u64 grp : NVM_GEN_CH_BITS;
+ u64 pu : NVM_GEN_LUN_BITS;
+ u64 chk : NVM_GEN_BLK_BITS;
+ u64 sec : NVM_20_SEC_BITS;
+ u64 reserved : NVM_20_RESERVED;
+ } m;
+
struct {
u64 line : 63;
u64 is_cached : 1;
@@ -49,10 +81,13 @@ struct nvm_rq;
struct nvm_id;
struct nvm_dev;
struct nvm_tgt_dev;
+struct nvm_chk_meta;
-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
+typedef int (nvm_id_fn)(struct nvm_dev *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
+typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
+ sector_t, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -66,6 +101,8 @@ struct nvm_dev_ops {
nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb_tbl;
+ nvm_get_chk_meta_fn *get_chk_meta;
+
nvm_submit_io_fn *submit_io;
nvm_submit_io_sync_fn *submit_io_sync;
@@ -73,8 +110,6 @@ struct nvm_dev_ops {
nvm_destroy_dma_pool_fn *destroy_dma_pool;
nvm_dev_dma_alloc_fn *dev_dma_alloc;
nvm_dev_dma_free_fn *dev_dma_free;
-
- unsigned int max_phys_sect;
};
#ifdef CONFIG_NVM
@@ -154,60 +189,75 @@ struct nvm_id_lp_tbl {
struct nvm_id_lp_mlc mlc;
};
-struct nvm_id_group {
- u8 mtype;
- u8 fmtype;
- u8 num_ch;
- u8 num_lun;
- u16 num_chk;
- u16 clba;
- u16 csecs;
- u16 sos;
-
- u16 ws_min;
- u16 ws_opt;
- u16 ws_seq;
- u16 ws_per_chk;
-
- u32 trdt;
- u32 trdm;
- u32 tprt;
- u32 tprm;
- u32 tbet;
- u32 tbem;
- u32 mpos;
- u32 mccap;
- u16 cpar;
-
- /* 1.2 compatibility */
- u8 num_pln;
- u16 num_pg;
- u16 fpg_sz;
-};
-
-struct nvm_addr_format {
- u8 ch_offset;
+struct nvm_addrf_12 {
u8 ch_len;
- u8 lun_offset;
u8 lun_len;
- u8 pln_offset;
+ u8 blk_len;
+ u8 pg_len;
u8 pln_len;
+ u8 sec_len;
+
+ u8 ch_offset;
+ u8 lun_offset;
u8 blk_offset;
- u8 blk_len;
u8 pg_offset;
- u8 pg_len;
- u8 sect_offset;
- u8 sect_len;
+ u8 pln_offset;
+ u8 sec_offset;
+
+ u64 ch_mask;
+ u64 lun_mask;
+ u64 blk_mask;
+ u64 pg_mask;
+ u64 pln_mask;
+ u64 sec_mask;
};
-struct nvm_id {
- u8 ver_id;
- u8 vmnt;
- u32 cap;
- u32 dom;
- struct nvm_addr_format ppaf;
- struct nvm_id_group grp;
-} __packed;
+struct nvm_addrf {
+ u8 ch_len;
+ u8 lun_len;
+ u8 chk_len;
+ u8 sec_len;
+ u8 rsv_len[2];
+
+ u8 ch_offset;
+ u8 lun_offset;
+ u8 chk_offset;
+ u8 sec_offset;
+ u8 rsv_off[2];
+
+ u64 ch_mask;
+ u64 lun_mask;
+ u64 chk_mask;
+ u64 sec_mask;
+ u64 rsv_mask[2];
+};
+
+enum {
+ /* Chunk states */
+ NVM_CHK_ST_FREE = 1 << 0,
+ NVM_CHK_ST_CLOSED = 1 << 1,
+ NVM_CHK_ST_OPEN = 1 << 2,
+ NVM_CHK_ST_OFFLINE = 1 << 3,
+
+ /* Chunk types */
+ NVM_CHK_TP_W_SEQ = 1 << 0,
+ NVM_CHK_TP_W_RAN = 1 << 1,
+ NVM_CHK_TP_SZ_SPEC = 1 << 4,
+};
+
+/*
+ * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
+ * buffer can be used when converting from little endian to cpu addressing.
+ */
+struct nvm_chk_meta {
+ u8 state;
+ u8 type;
+ u8 wi;
+ u8 rsvd[5];
+ u64 slba;
+ u64 cnlb;
+ u64 wp;
+};
struct nvm_target {
struct list_head list;
@@ -226,6 +276,8 @@ struct nvm_target {
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
+#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
+
struct nvm_rq;
typedef void (nvm_end_io_fn)(struct nvm_rq *);
@@ -272,38 +324,69 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
-
-/* Device generic information */
+/* Instance geometry */
struct nvm_geo {
- /* generic geometry */
- int nr_chnls;
- int all_luns; /* across channels */
- int nr_luns; /* per channel */
- int nr_chks; /* per lun */
+ /* device reported version */
+ u8 major_ver_id;
+ u8 minor_ver_id;
+
+ /* kernel short version */
+ u8 version;
- int sec_size;
- int oob_size;
- int mccap;
+ /* instance specific geometry */
+ int num_ch;
+ int num_lun; /* per channel */
- int sec_per_chk;
- int sec_per_lun;
+ /* calculated values */
+ int all_luns; /* across channels */
+ int all_chunks; /* across channels */
- int ws_min;
- int ws_opt;
- int ws_seq;
- int ws_per_chk;
+ int op; /* over-provision in instance */
- int max_rq_size;
+ sector_t total_secs; /* across channels */
+
+ /* chunk geometry */
+ u32 num_chk; /* chunks per lun */
+ u32 clba; /* sectors per chunk */
+ u16 csecs; /* sector size */
+ u16 sos; /* out-of-band area size */
+
+ /* device write constrains */
+ u32 ws_min; /* minimum write size */
+ u32 ws_opt; /* optimal write size */
+ u32 mw_cunits; /* distance required for successful read */
+ u32 maxoc; /* maximum open chunks */
+ u32 maxocpu; /* maximum open chunks per parallel unit */
+
+ /* device capabilities */
+ u32 mccap;
+
+ /* device timings */
+ u32 trdt; /* Avg. Tread (ns) */
+ u32 trdm; /* Max Tread (ns) */
+ u32 tprt; /* Avg. Tprog (ns) */
+ u32 tprm; /* Max Tprog (ns) */
+ u32 tbet; /* Avg. Terase (ns) */
+ u32 tbem; /* Max Terase (ns) */
+
+ /* generic address format */
+ struct nvm_addrf addrf;
+
+ /* 1.2 compatibility */
+ u8 vmnt;
+ u32 cap;
+ u32 dom;
- int op;
+ u8 mtype;
+ u8 fmtype;
- struct nvm_addr_format ppaf;
+ u16 cpar;
+ u32 mpos;
- /* Legacy 1.2 specific geometry */
- int plane_mode; /* drive device in single, double or quad mode */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int sec_per_pl; /* all sectors across planes */
+ u8 num_pln;
+ u8 pln_mode;
+ u16 num_pg;
+ u16 fpg_sz;
};
/* sub-device structure */
@@ -314,9 +397,6 @@ struct nvm_tgt_dev {
/* Base ppas for target LUNs */
struct ppa_addr *luns;
- sector_t total_secs;
-
- struct nvm_id identity;
struct request_queue *q;
struct nvm_dev *parent;
@@ -331,13 +411,9 @@ struct nvm_dev {
/* Device information */
struct nvm_geo geo;
- unsigned long total_secs;
-
unsigned long *lun_map;
void *dma_pool;
- struct nvm_id identity;
-
/* Backend device */
struct request_queue *q;
char name[DISK_NAME_LEN];
@@ -353,44 +429,58 @@ struct nvm_dev {
struct list_head targets;
};
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
- struct nvm_geo *geo = &tgt_dev->geo;
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr l;
- l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
- l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
- l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
- l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
- l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
- l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
+
+ l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
+ l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
+ l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
+ l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
+ l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
+ l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = &geo->addrf;
+
+ l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
+ l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
+ l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
+ l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
+ }
return l;
}
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
- struct nvm_geo *geo = &tgt_dev->geo;
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr l;
l.ppa = 0;
- /*
- * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
- */
- l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
- (((1 << geo->ppaf.blk_len) - 1));
- l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
- (((1 << geo->ppaf.pg_len) - 1));
- l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
- (((1 << geo->ppaf.sect_len) - 1));
- l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
- (((1 << geo->ppaf.pln_len) - 1));
- l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
- (((1 << geo->ppaf.lun_len) - 1));
- l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
- (((1 << geo->ppaf.ch_len) - 1));
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
+
+ l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
+ l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
+ l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
+ l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
+ l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
+ l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = &geo->addrf;
+
+ l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
+ l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
+ l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
+ l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
+ }
return l;
}
@@ -434,9 +524,13 @@ extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
+
+extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_chk_meta *meta, struct ppa_addr ppa,
+ int nchks);
+
extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
-extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
extern void nvm_end_io(struct nvm_rq *);
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index bb8129a3474d..96def9d15b1b 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -32,6 +32,7 @@ struct list_lru_one {
};
struct list_lru_memcg {
+ struct rcu_head rcu;
/* array of per cgroup lists, indexed by memcg_cache_id */
struct list_lru_one *lru[0];
};
@@ -43,7 +44,7 @@ struct list_lru_node {
struct list_lru_one lru;
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg *memcg_lrus;
+ struct list_lru_memcg __rcu *memcg_lrus;
#endif
long nr_items;
} ____cacheline_aligned_in_smp;
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
new file mode 100644
index 000000000000..cbd9d8495690
--- /dev/null
+++ b/include/linux/logic_pio.h
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ */
+
+#ifndef __LINUX_LOGIC_PIO_H
+#define __LINUX_LOGIC_PIO_H
+
+#include <linux/fwnode.h>
+
+enum {
+ LOGIC_PIO_INDIRECT, /* Indirect IO flag */
+ LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */
+};
+
+struct logic_pio_hwaddr {
+ struct list_head list;
+ struct fwnode_handle *fwnode;
+ resource_size_t hw_start;
+ resource_size_t io_start;
+ resource_size_t size; /* range size populated */
+ unsigned long flags;
+
+ void *hostdata;
+ const struct logic_pio_host_ops *ops;
+};
+
+struct logic_pio_host_ops {
+ u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth);
+ void (*out)(void *hostdata, unsigned long addr, u32 val,
+ size_t dwidth);
+ u32 (*ins)(void *hostdata, unsigned long addr, void *buffer,
+ size_t dwidth, unsigned int count);
+ void (*outs)(void *hostdata, unsigned long addr, const void *buffer,
+ size_t dwidth, unsigned int count);
+};
+
+#ifdef CONFIG_INDIRECT_PIO
+u8 logic_inb(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+u16 logic_inw(unsigned long addr);
+u32 logic_inl(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+void logic_insb(unsigned long addr, void *buffer, unsigned int count);
+void logic_insl(unsigned long addr, void *buffer, unsigned int count);
+void logic_insw(unsigned long addr, void *buffer, unsigned int count);
+void logic_outsb(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsw(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
+
+#ifndef inb
+#define inb logic_inb
+#endif
+
+#ifndef inw
+#define inw logic_inw
+#endif
+
+#ifndef inl
+#define inl logic_inl
+#endif
+
+#ifndef outb
+#define outb logic_outb
+#endif
+
+#ifndef outw
+#define outw logic_outw
+#endif
+
+#ifndef outl
+#define outl logic_outl
+#endif
+
+#ifndef insb
+#define insb logic_insb
+#endif
+
+#ifndef insw
+#define insw logic_insw
+#endif
+
+#ifndef insl
+#define insl logic_insl
+#endif
+
+#ifndef outsb
+#define outsb logic_outsb
+#endif
+
+#ifndef outsw
+#define outsw logic_outsw
+#endif
+
+#ifndef outsl
+#define outsl logic_outsl
+#endif
+
+/*
+ * We reserve 0x4000 bytes for Indirect IO as so far this library is only
+ * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO
+ * area by redefining the macro below.
+ */
+#define PIO_INDIRECT_SIZE 0x4000
+#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
+#else
+#define MMIO_UPPER_LIMIT IO_SPACE_LIMIT
+#endif /* CONFIG_INDIRECT_PIO */
+
+struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t hw_addr, resource_size_t size);
+int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+resource_size_t logic_pio_to_hwaddr(unsigned long pio);
+unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
+
+#endif /* __LINUX_LOGIC_PIO_H */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index bde167fa2c51..9d0b286f3dba 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -554,6 +554,10 @@
* @new points to the new credentials.
* @old points to the original credentials.
* Transfer data from original creds to new creds
+ * @cred_getsecid:
+ * Retrieve the security identifier of the cred structure @c
+ * @c contains the credentials, secid will be placed into @secid.
+ * In case of failure, @secid will be set to zero.
* @kernel_act_as:
* Set the credentials for a kernel service to act as (subjective context).
* @new points to the credentials to be modified.
@@ -672,7 +676,8 @@
* @p contains the task_struct for process.
* @info contains the signal information.
* @sig contains the signal value.
- * @secid contains the sid of the process where the signal originated
+ * @cred contains the cred of the process where the signal originated, or
+ * NULL if the current task is the originator.
* Return 0 if permission is granted.
* @task_prctl:
* Check permission before performing a process control operation on the
@@ -906,6 +911,33 @@
* associated with the TUN device's security structure.
* @security pointer to the TUN devices's security structure.
*
+ * Security hooks for SCTP
+ *
+ * @sctp_assoc_request:
+ * Passes the @ep and @chunk->skb of the association INIT packet to
+ * the security module.
+ * @ep pointer to sctp endpoint structure.
+ * @skb pointer to skbuff of association packet.
+ * Return 0 on success, error on failure.
+ * @sctp_bind_connect:
+ * Validiate permissions required for each address associated with sock
+ * @sk. Depending on @optname, the addresses will be treated as either
+ * for a connect or bind service. The @addrlen is calculated on each
+ * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or
+ * sizeof(struct sockaddr_in6).
+ * @sk pointer to sock structure.
+ * @optname name of the option to validate.
+ * @address list containing one or more ipv4/ipv6 addresses.
+ * @addrlen total length of address(s).
+ * Return 0 on success, error on failure.
+ * @sctp_sk_clone:
+ * Called whenever a new socket is created by accept(2) (i.e. a TCP
+ * style socket) or when a socket is 'peeled off' e.g userspace
+ * calls sctp_peeloff(3).
+ * @ep pointer to current sctp endpoint structure.
+ * @sk pointer to current sock structure.
+ * @sk pointer to new sock structure.
+ *
* Security hooks for Infiniband
*
* @ib_pkey_access:
@@ -1541,6 +1573,7 @@ union security_list_options {
int (*cred_prepare)(struct cred *new, const struct cred *old,
gfp_t gfp);
void (*cred_transfer)(struct cred *new, const struct cred *old);
+ void (*cred_getsecid)(const struct cred *c, u32 *secid);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
@@ -1564,7 +1597,7 @@ union security_list_options {
int (*task_getscheduler)(struct task_struct *p);
int (*task_movememory)(struct task_struct *p);
int (*task_kill)(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
+ int sig, const struct cred *cred);
int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void (*task_to_inode)(struct task_struct *p, struct inode *inode);
@@ -1665,6 +1698,12 @@ union security_list_options {
int (*tun_dev_attach_queue)(void *security);
int (*tun_dev_attach)(struct sock *sk, void *security);
int (*tun_dev_open)(void *security);
+ int (*sctp_assoc_request)(struct sctp_endpoint *ep,
+ struct sk_buff *skb);
+ int (*sctp_bind_connect)(struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen);
+ void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk,
+ struct sock *newsk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -1730,230 +1769,234 @@ union security_list_options {
};
struct security_hook_heads {
- struct list_head binder_set_context_mgr;
- struct list_head binder_transaction;
- struct list_head binder_transfer_binder;
- struct list_head binder_transfer_file;
- struct list_head ptrace_access_check;
- struct list_head ptrace_traceme;
- struct list_head capget;
- struct list_head capset;
- struct list_head capable;
- struct list_head quotactl;
- struct list_head quota_on;
- struct list_head syslog;
- struct list_head settime;
- struct list_head vm_enough_memory;
- struct list_head bprm_set_creds;
- struct list_head bprm_check_security;
- struct list_head bprm_committing_creds;
- struct list_head bprm_committed_creds;
- struct list_head sb_alloc_security;
- struct list_head sb_free_security;
- struct list_head sb_copy_data;
- struct list_head sb_remount;
- struct list_head sb_kern_mount;
- struct list_head sb_show_options;
- struct list_head sb_statfs;
- struct list_head sb_mount;
- struct list_head sb_umount;
- struct list_head sb_pivotroot;
- struct list_head sb_set_mnt_opts;
- struct list_head sb_clone_mnt_opts;
- struct list_head sb_parse_opts_str;
- struct list_head dentry_init_security;
- struct list_head dentry_create_files_as;
+ struct hlist_head binder_set_context_mgr;
+ struct hlist_head binder_transaction;
+ struct hlist_head binder_transfer_binder;
+ struct hlist_head binder_transfer_file;
+ struct hlist_head ptrace_access_check;
+ struct hlist_head ptrace_traceme;
+ struct hlist_head capget;
+ struct hlist_head capset;
+ struct hlist_head capable;
+ struct hlist_head quotactl;
+ struct hlist_head quota_on;
+ struct hlist_head syslog;
+ struct hlist_head settime;
+ struct hlist_head vm_enough_memory;
+ struct hlist_head bprm_set_creds;
+ struct hlist_head bprm_check_security;
+ struct hlist_head bprm_committing_creds;
+ struct hlist_head bprm_committed_creds;
+ struct hlist_head sb_alloc_security;
+ struct hlist_head sb_free_security;
+ struct hlist_head sb_copy_data;
+ struct hlist_head sb_remount;
+ struct hlist_head sb_kern_mount;
+ struct hlist_head sb_show_options;
+ struct hlist_head sb_statfs;
+ struct hlist_head sb_mount;
+ struct hlist_head sb_umount;
+ struct hlist_head sb_pivotroot;
+ struct hlist_head sb_set_mnt_opts;
+ struct hlist_head sb_clone_mnt_opts;
+ struct hlist_head sb_parse_opts_str;
+ struct hlist_head dentry_init_security;
+ struct hlist_head dentry_create_files_as;
#ifdef CONFIG_SECURITY_PATH
- struct list_head path_unlink;
- struct list_head path_mkdir;
- struct list_head path_rmdir;
- struct list_head path_mknod;
- struct list_head path_truncate;
- struct list_head path_symlink;
- struct list_head path_link;
- struct list_head path_rename;
- struct list_head path_chmod;
- struct list_head path_chown;
- struct list_head path_chroot;
+ struct hlist_head path_unlink;
+ struct hlist_head path_mkdir;
+ struct hlist_head path_rmdir;
+ struct hlist_head path_mknod;
+ struct hlist_head path_truncate;
+ struct hlist_head path_symlink;
+ struct hlist_head path_link;
+ struct hlist_head path_rename;
+ struct hlist_head path_chmod;
+ struct hlist_head path_chown;
+ struct hlist_head path_chroot;
#endif
- struct list_head inode_alloc_security;
- struct list_head inode_free_security;
- struct list_head inode_init_security;
- struct list_head inode_create;
- struct list_head inode_link;
- struct list_head inode_unlink;
- struct list_head inode_symlink;
- struct list_head inode_mkdir;
- struct list_head inode_rmdir;
- struct list_head inode_mknod;
- struct list_head inode_rename;
- struct list_head inode_readlink;
- struct list_head inode_follow_link;
- struct list_head inode_permission;
- struct list_head inode_setattr;
- struct list_head inode_getattr;
- struct list_head inode_setxattr;
- struct list_head inode_post_setxattr;
- struct list_head inode_getxattr;
- struct list_head inode_listxattr;
- struct list_head inode_removexattr;
- struct list_head inode_need_killpriv;
- struct list_head inode_killpriv;
- struct list_head inode_getsecurity;
- struct list_head inode_setsecurity;
- struct list_head inode_listsecurity;
- struct list_head inode_getsecid;
- struct list_head inode_copy_up;
- struct list_head inode_copy_up_xattr;
- struct list_head file_permission;
- struct list_head file_alloc_security;
- struct list_head file_free_security;
- struct list_head file_ioctl;
- struct list_head mmap_addr;
- struct list_head mmap_file;
- struct list_head file_mprotect;
- struct list_head file_lock;
- struct list_head file_fcntl;
- struct list_head file_set_fowner;
- struct list_head file_send_sigiotask;
- struct list_head file_receive;
- struct list_head file_open;
- struct list_head task_alloc;
- struct list_head task_free;
- struct list_head cred_alloc_blank;
- struct list_head cred_free;
- struct list_head cred_prepare;
- struct list_head cred_transfer;
- struct list_head kernel_act_as;
- struct list_head kernel_create_files_as;
- struct list_head kernel_read_file;
- struct list_head kernel_post_read_file;
- struct list_head kernel_module_request;
- struct list_head task_fix_setuid;
- struct list_head task_setpgid;
- struct list_head task_getpgid;
- struct list_head task_getsid;
- struct list_head task_getsecid;
- struct list_head task_setnice;
- struct list_head task_setioprio;
- struct list_head task_getioprio;
- struct list_head task_prlimit;
- struct list_head task_setrlimit;
- struct list_head task_setscheduler;
- struct list_head task_getscheduler;
- struct list_head task_movememory;
- struct list_head task_kill;
- struct list_head task_prctl;
- struct list_head task_to_inode;
- struct list_head ipc_permission;
- struct list_head ipc_getsecid;
- struct list_head msg_msg_alloc_security;
- struct list_head msg_msg_free_security;
- struct list_head msg_queue_alloc_security;
- struct list_head msg_queue_free_security;
- struct list_head msg_queue_associate;
- struct list_head msg_queue_msgctl;
- struct list_head msg_queue_msgsnd;
- struct list_head msg_queue_msgrcv;
- struct list_head shm_alloc_security;
- struct list_head shm_free_security;
- struct list_head shm_associate;
- struct list_head shm_shmctl;
- struct list_head shm_shmat;
- struct list_head sem_alloc_security;
- struct list_head sem_free_security;
- struct list_head sem_associate;
- struct list_head sem_semctl;
- struct list_head sem_semop;
- struct list_head netlink_send;
- struct list_head d_instantiate;
- struct list_head getprocattr;
- struct list_head setprocattr;
- struct list_head ismaclabel;
- struct list_head secid_to_secctx;
- struct list_head secctx_to_secid;
- struct list_head release_secctx;
- struct list_head inode_invalidate_secctx;
- struct list_head inode_notifysecctx;
- struct list_head inode_setsecctx;
- struct list_head inode_getsecctx;
+ struct hlist_head inode_alloc_security;
+ struct hlist_head inode_free_security;
+ struct hlist_head inode_init_security;
+ struct hlist_head inode_create;
+ struct hlist_head inode_link;
+ struct hlist_head inode_unlink;
+ struct hlist_head inode_symlink;
+ struct hlist_head inode_mkdir;
+ struct hlist_head inode_rmdir;
+ struct hlist_head inode_mknod;
+ struct hlist_head inode_rename;
+ struct hlist_head inode_readlink;
+ struct hlist_head inode_follow_link;
+ struct hlist_head inode_permission;
+ struct hlist_head inode_setattr;
+ struct hlist_head inode_getattr;
+ struct hlist_head inode_setxattr;
+ struct hlist_head inode_post_setxattr;
+ struct hlist_head inode_getxattr;
+ struct hlist_head inode_listxattr;
+ struct hlist_head inode_removexattr;
+ struct hlist_head inode_need_killpriv;
+ struct hlist_head inode_killpriv;
+ struct hlist_head inode_getsecurity;
+ struct hlist_head inode_setsecurity;
+ struct hlist_head inode_listsecurity;
+ struct hlist_head inode_getsecid;
+ struct hlist_head inode_copy_up;
+ struct hlist_head inode_copy_up_xattr;
+ struct hlist_head file_permission;
+ struct hlist_head file_alloc_security;
+ struct hlist_head file_free_security;
+ struct hlist_head file_ioctl;
+ struct hlist_head mmap_addr;
+ struct hlist_head mmap_file;
+ struct hlist_head file_mprotect;
+ struct hlist_head file_lock;
+ struct hlist_head file_fcntl;
+ struct hlist_head file_set_fowner;
+ struct hlist_head file_send_sigiotask;
+ struct hlist_head file_receive;
+ struct hlist_head file_open;
+ struct hlist_head task_alloc;
+ struct hlist_head task_free;
+ struct hlist_head cred_alloc_blank;
+ struct hlist_head cred_free;
+ struct hlist_head cred_prepare;
+ struct hlist_head cred_transfer;
+ struct hlist_head cred_getsecid;
+ struct hlist_head kernel_act_as;
+ struct hlist_head kernel_create_files_as;
+ struct hlist_head kernel_read_file;
+ struct hlist_head kernel_post_read_file;
+ struct hlist_head kernel_module_request;
+ struct hlist_head task_fix_setuid;
+ struct hlist_head task_setpgid;
+ struct hlist_head task_getpgid;
+ struct hlist_head task_getsid;
+ struct hlist_head task_getsecid;
+ struct hlist_head task_setnice;
+ struct hlist_head task_setioprio;
+ struct hlist_head task_getioprio;
+ struct hlist_head task_prlimit;
+ struct hlist_head task_setrlimit;
+ struct hlist_head task_setscheduler;
+ struct hlist_head task_getscheduler;
+ struct hlist_head task_movememory;
+ struct hlist_head task_kill;
+ struct hlist_head task_prctl;
+ struct hlist_head task_to_inode;
+ struct hlist_head ipc_permission;
+ struct hlist_head ipc_getsecid;
+ struct hlist_head msg_msg_alloc_security;
+ struct hlist_head msg_msg_free_security;
+ struct hlist_head msg_queue_alloc_security;
+ struct hlist_head msg_queue_free_security;
+ struct hlist_head msg_queue_associate;
+ struct hlist_head msg_queue_msgctl;
+ struct hlist_head msg_queue_msgsnd;
+ struct hlist_head msg_queue_msgrcv;
+ struct hlist_head shm_alloc_security;
+ struct hlist_head shm_free_security;
+ struct hlist_head shm_associate;
+ struct hlist_head shm_shmctl;
+ struct hlist_head shm_shmat;
+ struct hlist_head sem_alloc_security;
+ struct hlist_head sem_free_security;
+ struct hlist_head sem_associate;
+ struct hlist_head sem_semctl;
+ struct hlist_head sem_semop;
+ struct hlist_head netlink_send;
+ struct hlist_head d_instantiate;
+ struct hlist_head getprocattr;
+ struct hlist_head setprocattr;
+ struct hlist_head ismaclabel;
+ struct hlist_head secid_to_secctx;
+ struct hlist_head secctx_to_secid;
+ struct hlist_head release_secctx;
+ struct hlist_head inode_invalidate_secctx;
+ struct hlist_head inode_notifysecctx;
+ struct hlist_head inode_setsecctx;
+ struct hlist_head inode_getsecctx;
#ifdef CONFIG_SECURITY_NETWORK
- struct list_head unix_stream_connect;
- struct list_head unix_may_send;
- struct list_head socket_create;
- struct list_head socket_post_create;
- struct list_head socket_bind;
- struct list_head socket_connect;
- struct list_head socket_listen;
- struct list_head socket_accept;
- struct list_head socket_sendmsg;
- struct list_head socket_recvmsg;
- struct list_head socket_getsockname;
- struct list_head socket_getpeername;
- struct list_head socket_getsockopt;
- struct list_head socket_setsockopt;
- struct list_head socket_shutdown;
- struct list_head socket_sock_rcv_skb;
- struct list_head socket_getpeersec_stream;
- struct list_head socket_getpeersec_dgram;
- struct list_head sk_alloc_security;
- struct list_head sk_free_security;
- struct list_head sk_clone_security;
- struct list_head sk_getsecid;
- struct list_head sock_graft;
- struct list_head inet_conn_request;
- struct list_head inet_csk_clone;
- struct list_head inet_conn_established;
- struct list_head secmark_relabel_packet;
- struct list_head secmark_refcount_inc;
- struct list_head secmark_refcount_dec;
- struct list_head req_classify_flow;
- struct list_head tun_dev_alloc_security;
- struct list_head tun_dev_free_security;
- struct list_head tun_dev_create;
- struct list_head tun_dev_attach_queue;
- struct list_head tun_dev_attach;
- struct list_head tun_dev_open;
+ struct hlist_head unix_stream_connect;
+ struct hlist_head unix_may_send;
+ struct hlist_head socket_create;
+ struct hlist_head socket_post_create;
+ struct hlist_head socket_bind;
+ struct hlist_head socket_connect;
+ struct hlist_head socket_listen;
+ struct hlist_head socket_accept;
+ struct hlist_head socket_sendmsg;
+ struct hlist_head socket_recvmsg;
+ struct hlist_head socket_getsockname;
+ struct hlist_head socket_getpeername;
+ struct hlist_head socket_getsockopt;
+ struct hlist_head socket_setsockopt;
+ struct hlist_head socket_shutdown;
+ struct hlist_head socket_sock_rcv_skb;
+ struct hlist_head socket_getpeersec_stream;
+ struct hlist_head socket_getpeersec_dgram;
+ struct hlist_head sk_alloc_security;
+ struct hlist_head sk_free_security;
+ struct hlist_head sk_clone_security;
+ struct hlist_head sk_getsecid;
+ struct hlist_head sock_graft;
+ struct hlist_head inet_conn_request;
+ struct hlist_head inet_csk_clone;
+ struct hlist_head inet_conn_established;
+ struct hlist_head secmark_relabel_packet;
+ struct hlist_head secmark_refcount_inc;
+ struct hlist_head secmark_refcount_dec;
+ struct hlist_head req_classify_flow;
+ struct hlist_head tun_dev_alloc_security;
+ struct hlist_head tun_dev_free_security;
+ struct hlist_head tun_dev_create;
+ struct hlist_head tun_dev_attach_queue;
+ struct hlist_head tun_dev_attach;
+ struct hlist_head tun_dev_open;
+ struct hlist_head sctp_assoc_request;
+ struct hlist_head sctp_bind_connect;
+ struct hlist_head sctp_sk_clone;
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
- struct list_head ib_pkey_access;
- struct list_head ib_endport_manage_subnet;
- struct list_head ib_alloc_security;
- struct list_head ib_free_security;
+ struct hlist_head ib_pkey_access;
+ struct hlist_head ib_endport_manage_subnet;
+ struct hlist_head ib_alloc_security;
+ struct hlist_head ib_free_security;
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
- struct list_head xfrm_policy_alloc_security;
- struct list_head xfrm_policy_clone_security;
- struct list_head xfrm_policy_free_security;
- struct list_head xfrm_policy_delete_security;
- struct list_head xfrm_state_alloc;
- struct list_head xfrm_state_alloc_acquire;
- struct list_head xfrm_state_free_security;
- struct list_head xfrm_state_delete_security;
- struct list_head xfrm_policy_lookup;
- struct list_head xfrm_state_pol_flow_match;
- struct list_head xfrm_decode_session;
+ struct hlist_head xfrm_policy_alloc_security;
+ struct hlist_head xfrm_policy_clone_security;
+ struct hlist_head xfrm_policy_free_security;
+ struct hlist_head xfrm_policy_delete_security;
+ struct hlist_head xfrm_state_alloc;
+ struct hlist_head xfrm_state_alloc_acquire;
+ struct hlist_head xfrm_state_free_security;
+ struct hlist_head xfrm_state_delete_security;
+ struct hlist_head xfrm_policy_lookup;
+ struct hlist_head xfrm_state_pol_flow_match;
+ struct hlist_head xfrm_decode_session;
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_KEYS
- struct list_head key_alloc;
- struct list_head key_free;
- struct list_head key_permission;
- struct list_head key_getsecurity;
+ struct hlist_head key_alloc;
+ struct hlist_head key_free;
+ struct hlist_head key_permission;
+ struct hlist_head key_getsecurity;
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
- struct list_head audit_rule_init;
- struct list_head audit_rule_known;
- struct list_head audit_rule_match;
- struct list_head audit_rule_free;
+ struct hlist_head audit_rule_init;
+ struct hlist_head audit_rule_known;
+ struct hlist_head audit_rule_match;
+ struct hlist_head audit_rule_free;
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
- struct list_head bpf;
- struct list_head bpf_map;
- struct list_head bpf_prog;
- struct list_head bpf_map_alloc_security;
- struct list_head bpf_map_free_security;
- struct list_head bpf_prog_alloc_security;
- struct list_head bpf_prog_free_security;
+ struct hlist_head bpf;
+ struct hlist_head bpf_map;
+ struct hlist_head bpf_prog;
+ struct hlist_head bpf_map_alloc_security;
+ struct hlist_head bpf_map_free_security;
+ struct hlist_head bpf_prog_alloc_security;
+ struct hlist_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
} __randomize_layout;
@@ -1962,8 +2005,8 @@ struct security_hook_heads {
* For use with generic list macros for common operations.
*/
struct security_hook_list {
- struct list_head list;
- struct list_head *head;
+ struct hlist_node list;
+ struct hlist_head *head;
union security_list_options hook;
char *lsm;
} __randomize_layout;
@@ -2002,7 +2045,7 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
int i;
for (i = 0; i < count; i++)
- list_del_rcu(&hooks[i].list);
+ hlist_del_rcu(&hooks[i].list);
}
#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f92ea7783652..ca59883c8364 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -318,6 +318,9 @@ static inline bool memblock_bottom_up(void)
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
ulong flags);
+phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t max_addr,
+ int nid, ulong flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -416,21 +419,11 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
-
-extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr);
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
-
-static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr)
-{
- return 0;
-}
-
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index f71e732c77b2..31ca3e28b0eb 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -33,6 +33,7 @@ struct memory_block {
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
struct device dev;
+ int nid; /* NID for this memory block */
};
int arch_get_memory_phys_device(unsigned long start_pfn);
@@ -109,7 +110,7 @@ extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
-extern int register_new_memory(int, struct mem_section *);
+int hotplug_memory_register(int nid, struct mem_section *section);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index aba5f86eb038..2b0265265c28 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -52,24 +52,6 @@ enum {
};
/*
- * pgdat resizing functions
- */
-static inline
-void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_lock_irqsave(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_init(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->node_size_lock);
-}
-/*
* Zone resizing functions
*
* Note: any attempt to resize a zone should has pgdat_resize_lock()
@@ -246,13 +228,6 @@ extern void clear_zone_contiguous(struct zone *zone);
___page; \
})
-/*
- * Stub functions for when hotplug is off
- */
-static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
-
static inline unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
@@ -293,6 +268,34 @@ static inline bool movable_node_is_enabled(void)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
+/*
+ * pgdat resizing functions
+ */
+static inline
+void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_lock_irqsave(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_init(struct pglist_data *pgdat)
+{
+ spin_lock_init(&pgdat->node_size_lock);
+}
+#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+/*
+ * Stub functions for when hotplug is off
+ */
+static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a2246cf670ba..ab45f8a0d288 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -25,7 +25,7 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
- MR_CMA,
+ MR_CONTIG_RANGE,
MR_TYPES
};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a9b5fed8f7c6..81d0799b6091 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -257,10 +257,6 @@ enum {
};
enum {
- MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
-};
-
-enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 12758595459b..2bc27f8c5b87 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1017,6 +1017,8 @@ enum mlx5_cap_type {
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
MLX5_CAP_DEBUG,
+ MLX5_CAP_RESERVED_14,
+ MLX5_CAP_DEV_MEM,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1168,6 +1170,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP64_FPGA(mdev, cap) \
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
+#define MLX5_CAP_DEV_MEM(mdev, cap)\
+ MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+
+#define MLX5_CAP64_DEV_MEM(mdev, cap)\
+ MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1211,8 +1219,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index cded85ab6fe4..767d193c269a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -591,8 +591,14 @@ struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_pagefault;
+struct mlx5_rate_limit {
+ u32 rate;
+ u32 max_burst_sz;
+ u16 typical_pkt_sz;
+};
+
struct mlx5_rl_entry {
- u32 rate;
+ struct mlx5_rate_limit rl;
u16 index;
u16 refcount;
};
@@ -1107,9 +1113,12 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
+ struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
index 7b476bbae731..9db21cd0e92c 100644
--- a/include/linux/mlx5/fs_helpers.h
+++ b/include/linux/mlx5/fs_helpers.h
@@ -38,6 +38,14 @@
#define MLX5_FS_IPV4_VERSION 4
#define MLX5_FS_IPV6_VERSION 6
+static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
+}
+
static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
const u32 *match_v, u8 match)
{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d25011f84815..1aad455538f4 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -92,6 +92,8 @@ enum {
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
+ MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -575,7 +577,10 @@ struct mlx5_ifc_qos_cap_bits {
u8 esw_scheduling[0x1];
u8 esw_bw_share[0x1];
u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1c];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 reserved_at_7[0x19];
u8 reserved_at_20[0x20];
@@ -669,6 +674,24 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_device_mem_cap_bits {
+ u8 memic[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0xb];
+ u8 log_min_memic_alloc_size[0x5];
+ u8 reserved_at_30[0x8];
+ u8 log_max_memic_addr_alignment[0x8];
+
+ u8 memic_bar_start_addr[0x40];
+
+ u8 memic_bar_size[0x20];
+
+ u8 max_memic_size[0x20];
+
+ u8 reserved_at_c0[0x740];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -883,7 +906,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
- u8 early_vf_enable[0x1];
+ u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
@@ -927,7 +950,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0x5];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
u8 reserved_at_20c[0x3];
u8 drain_sigerr[0x1];
@@ -2748,12 +2775,17 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
+ MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
};
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
- u8 reserved_at_2[0xd];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
u8 small_fence_on_rdma_read_response[0x1];
u8 umr_en[0x1];
u8 a[0x1];
@@ -2761,7 +2793,7 @@ struct mlx5_ifc_mkc_bits {
u8 rr[0x1];
u8 lw[0x1];
u8 lr[0x1];
- u8 access_mode[0x2];
+ u8 access_mode_1_0[0x2];
u8 reserved_at_18[0x8];
u8 qpn[0x18];
@@ -7397,7 +7429,12 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 rate_limit[0x20];
- u8 reserved_at_a0[0x160];
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_c0[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_access_register_out_bits {
@@ -8951,4 +8988,57 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_alloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_30[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 log_memic_addr_alignment[0x8];
+
+ u8 range_start_addr[0x40];
+
+ u8 range_size[0x20];
+
+ u8 memic_size[0x20];
+};
+
+struct mlx5_ifc_alloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 memic_start_addr[0x40];
+};
+
+struct mlx5_ifc_dealloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_start_addr[0x40];
+
+ u8 memic_size[0x20];
+
+ u8 reserved_at_e0[0x20];
+};
+
+struct mlx5_ifc_dealloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f945dff34925..3ad632366973 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -386,17 +386,19 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area);
int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
- int (*fault)(struct vm_fault *vmf);
- int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
+ vm_fault_t (*fault)(struct vm_fault *vmf);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
+ unsigned long (*pagesize)(struct vm_area_struct * area);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_fault *vmf);
+ vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
- int (*pfn_mkwrite)(struct vm_fault *vmf);
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@@ -903,7 +905,9 @@ extern int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
- return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+ struct page *p = (struct page *)page;
+
+ return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
@@ -1152,6 +1156,7 @@ static inline pgoff_t page_index(struct page *page)
bool page_mapped(struct page *page);
struct address_space *page_mapping(struct page *page);
+struct address_space *page_mapping_file(struct page *page);
/*
* Return true only if the page has been allocated with
@@ -2420,6 +2425,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
+ unsigned long addr, struct page *page)
+{
+ int err = vm_insert_page(vma, addr, page);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn)
+{
+ int err = vm_insert_mixed(vma, addr, pfn);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ int err = vm_insert_pfn(vma, addr, pfn);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int foll_flags,
@@ -2589,7 +2632,7 @@ extern int get_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
-extern atomic_long_t num_poisoned_pages;
+extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(struct page *page, int flags);
@@ -2611,6 +2654,7 @@ enum mf_action_page_type {
MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
+ MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fd1af6b9591d..21612347d311 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -22,6 +22,8 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+typedef int vm_fault_t;
+
struct address_space;
struct mem_cgroup;
struct hmm;
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 57b0030d3800..2ad72d2c8cc5 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -37,10 +37,10 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
-#define VM_WARN_ON(cond) WARN_ON(cond)
-#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
-#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
-#define VM_WARN(cond, format...) WARN(cond, format)
+#define VM_WARN_ON(cond) (void)WARN_ON(cond)
+#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
+#define VM_WARN(cond, format...) (void)WARN(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a2db4576e499..f11ae29005f1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -633,14 +633,15 @@ typedef struct pglist_data {
#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
* or node_spanned_pages stay constant. Holding this will also
* guarantee that any pfn_valid() stays that way.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
- * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
+ * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
+ * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
*
* Nests above zone->lock and zone->span_seqlock
*/
@@ -775,7 +776,8 @@ static inline bool is_dev_zone(const struct zone *zone)
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
-void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
+ enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
long free_pages);
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 3bf8f954b642..3102bd754d18 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/mtd/bbm.h
- *
* NAND family Bad Block Management (BBM) header file
* - Bad Block Table (BBT) implementation
*
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 205ededccc60..a86c4fa93115 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -30,32 +30,19 @@
#include <asm/div64.h>
-#define MTD_ERASE_PENDING 0x01
-#define MTD_ERASING 0x02
-#define MTD_ERASE_SUSPEND 0x04
-#define MTD_ERASE_DONE 0x08
-#define MTD_ERASE_FAILED 0x10
-
#define MTD_FAIL_ADDR_UNKNOWN -1LL
+struct mtd_info;
+
/*
* If the erase fails, fail_addr might indicate exactly which block failed. If
* fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
* or was not specific to any particular block.
*/
struct erase_info {
- struct mtd_info *mtd;
uint64_t addr;
uint64_t len;
uint64_t fail_addr;
- u_long time;
- u_long retries;
- unsigned dev;
- unsigned cell;
- void (*callback) (struct erase_info *self);
- u_long priv;
- u_char state;
- struct erase_info *next;
};
struct mtd_erase_region_info {
@@ -595,8 +582,6 @@ extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
-void mtd_erase_callback(struct erase_info *instr);
-
static inline int mtd_is_bitflip(int err) {
return err == -EUCLEAN;
}
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
new file mode 100644
index 000000000000..792ea5c26329
--- /dev/null
+++ b/include/linux/mtd/nand.h
@@ -0,0 +1,731 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 - Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#ifndef __LINUX_MTD_NAND_H
+#define __LINUX_MTD_NAND_H
+
+#include <linux/mtd/mtd.h>
+
+/**
+ * struct nand_memory_organization - Memory organization structure
+ * @bits_per_cell: number of bits per NAND cell
+ * @pagesize: page size
+ * @oobsize: OOB area size
+ * @pages_per_eraseblock: number of pages per eraseblock
+ * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @planes_per_lun: number of planes per LUN
+ * @luns_per_target: number of LUN per target (target is a synonym for die)
+ * @ntargets: total number of targets exposed by the NAND device
+ */
+struct nand_memory_organization {
+ unsigned int bits_per_cell;
+ unsigned int pagesize;
+ unsigned int oobsize;
+ unsigned int pages_per_eraseblock;
+ unsigned int eraseblocks_per_lun;
+ unsigned int planes_per_lun;
+ unsigned int luns_per_target;
+ unsigned int ntargets;
+};
+
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
+ { \
+ .bits_per_cell = (bpc), \
+ .pagesize = (ps), \
+ .oobsize = (os), \
+ .pages_per_eraseblock = (ppe), \
+ .eraseblocks_per_lun = (epl), \
+ .planes_per_lun = (ppl), \
+ .luns_per_target = (lpt), \
+ .ntargets = (nt), \
+ }
+
+/**
+ * struct nand_row_converter - Information needed to convert an absolute offset
+ * into a row address
+ * @lun_addr_shift: position of the LUN identifier in the row address
+ * @eraseblock_addr_shift: position of the eraseblock identifier in the row
+ * address
+ */
+struct nand_row_converter {
+ unsigned int lun_addr_shift;
+ unsigned int eraseblock_addr_shift;
+};
+
+/**
+ * struct nand_pos - NAND position object
+ * @target: the NAND target/die
+ * @lun: the LUN identifier
+ * @plane: the plane within the LUN
+ * @eraseblock: the eraseblock within the LUN
+ * @page: the page within the LUN
+ *
+ * These information are usually used by specific sub-layers to select the
+ * appropriate target/die and generate a row address to pass to the device.
+ */
+struct nand_pos {
+ unsigned int target;
+ unsigned int lun;
+ unsigned int plane;
+ unsigned int eraseblock;
+ unsigned int page;
+};
+
+/**
+ * struct nand_page_io_req - NAND I/O request object
+ * @pos: the position this I/O request is targeting
+ * @dataoffs: the offset within the page
+ * @datalen: number of data bytes to read from/write to this page
+ * @databuf: buffer to store data in or get data from
+ * @ooboffs: the OOB offset within the page
+ * @ooblen: the number of OOB bytes to read from/write to this page
+ * @oobbuf: buffer to store OOB data in or get OOB data from
+ *
+ * This object is used to pass per-page I/O requests to NAND sub-layers. This
+ * way all useful information are already formatted in a useful way and
+ * specific NAND layers can focus on translating these information into
+ * specific commands/operations.
+ */
+struct nand_page_io_req {
+ struct nand_pos pos;
+ unsigned int dataoffs;
+ unsigned int datalen;
+ union {
+ const void *out;
+ void *in;
+ } databuf;
+ unsigned int ooboffs;
+ unsigned int ooblen;
+ union {
+ const void *out;
+ void *in;
+ } oobbuf;
+};
+
+/**
+ * struct nand_ecc_req - NAND ECC requirements
+ * @strength: ECC strength
+ * @step_size: ECC step/block size
+ */
+struct nand_ecc_req {
+ unsigned int strength;
+ unsigned int step_size;
+};
+
+#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+
+/**
+ * struct nand_bbt - bad block table object
+ * @cache: in memory BBT cache
+ */
+struct nand_bbt {
+ unsigned long *cache;
+};
+
+struct nand_device;
+
+/**
+ * struct nand_ops - NAND operations
+ * @erase: erase a specific block. No need to check if the block is bad before
+ * erasing, this has been taken care of by the generic NAND layer
+ * @markbad: mark a specific block bad. No need to check if the block is
+ * already marked bad, this has been taken care of by the generic
+ * NAND layer. This method should just write the BBM (Bad Block
+ * Marker) so that future call to struct_nand_ops->isbad() return
+ * true
+ * @isbad: check whether a block is bad or not. This method should just read
+ * the BBM and return whether the block is bad or not based on what it
+ * reads
+ *
+ * These are all low level operations that should be implemented by specialized
+ * NAND layers (SPI NAND, raw NAND, ...).
+ */
+struct nand_ops {
+ int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
+ int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
+ bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
+};
+
+/**
+ * struct nand_device - NAND device
+ * @mtd: MTD instance attached to the NAND device
+ * @memorg: memory layout
+ * @eccreq: ECC requirements
+ * @rowconv: position to row address converter
+ * @bbt: bad block table info
+ * @ops: NAND operations attached to the NAND device
+ *
+ * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
+ * should declare their own NAND object embedding a nand_device struct (that's
+ * how inheritance is done).
+ * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
+ * at device detection time to reflect the NAND device
+ * capabilities/requirements. Once this is done nanddev_init() can be called.
+ * It will take care of converting NAND information into MTD ones, which means
+ * the specialized NAND layers should never manually tweak
+ * struct_nand_device->mtd except for the ->_read/write() hooks.
+ */
+struct nand_device {
+ struct mtd_info mtd;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_req eccreq;
+ struct nand_row_converter rowconv;
+ struct nand_bbt bbt;
+ const struct nand_ops *ops;
+};
+
+/**
+ * struct nand_io_iter - NAND I/O iterator
+ * @req: current I/O request
+ * @oobbytes_per_page: maximum number of OOB bytes per page
+ * @dataleft: remaining number of data bytes to read/write
+ * @oobleft: remaining number of OOB bytes to read/write
+ *
+ * Can be used by specialized NAND layers to iterate over all pages covered
+ * by an MTD I/O request, which should greatly simplifies the boiler-plate
+ * code needed to read/write data from/to a NAND device.
+ */
+struct nand_io_iter {
+ struct nand_page_io_req req;
+ unsigned int oobbytes_per_page;
+ unsigned int dataleft;
+ unsigned int oobleft;
+};
+
+/**
+ * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the NAND device embedding @mtd.
+ */
+static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_device, mtd);
+}
+
+/**
+ * nanddev_to_mtd() - Get the MTD device attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the MTD device embedded in @nand.
+ */
+static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
+{
+ return &nand->mtd;
+}
+
+/*
+ * nanddev_bits_per_cell() - Get the number of bits per cell
+ * @nand: NAND device
+ *
+ * Return: the number of bits per cell.
+ */
+static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
+{
+ return nand->memorg.bits_per_cell;
+}
+
+/**
+ * nanddev_page_size() - Get NAND page size
+ * @nand: NAND device
+ *
+ * Return: the page size.
+ */
+static inline size_t nanddev_page_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND OOB size
+ * @nand: NAND device
+ *
+ * Return: the OOB size.
+ */
+static inline unsigned int
+nanddev_per_page_oobsize(const struct nand_device *nand)
+{
+ return nand->memorg.oobsize;
+}
+
+/**
+ * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
+ * @nand: NAND device
+ *
+ * Return: the number of pages per eraseblock.
+ */
+static inline unsigned int
+nanddev_pages_per_eraseblock(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND erase block size
+ * @nand: NAND device
+ *
+ * Return: the eraseblock size.
+ */
+static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per LUN.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_lun(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_target_size() - Get the total size provided by a single target/die
+ * @nand: NAND device
+ *
+ * Return: the total size exposed by a single target/die in bytes.
+ */
+static inline u64 nanddev_target_size(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock *
+ nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_ntarget() - Get the total of targets
+ * @nand: NAND device
+ *
+ * Return: the number of targets/dies exposed by @nand.
+ */
+static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets;
+}
+
+/**
+ * nanddev_neraseblocks() - Get the total number of erasablocks
+ * @nand: NAND device
+ *
+ * Return: the total number of eraseblocks exposed by @nand.
+ */
+static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_size() - Get NAND size
+ * @nand: NAND device
+ *
+ * Return: the total size (in bytes) exposed by @nand.
+ */
+static inline u64 nanddev_size(const struct nand_device *nand)
+{
+ return nanddev_target_size(nand) * nanddev_ntargets(nand);
+}
+
+/**
+ * nanddev_get_memorg() - Extract memory organization info from a NAND device
+ * @nand: NAND device
+ *
+ * This can be used by the upper layer to fill the memorg info before calling
+ * nanddev_init().
+ *
+ * Return: the memorg object embedded in the NAND device.
+ */
+static inline struct nand_memory_organization *
+nanddev_get_memorg(struct nand_device *nand)
+{
+ return &nand->memorg;
+}
+
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner);
+void nanddev_cleanup(struct nand_device *nand);
+
+/**
+ * nanddev_register() - Register a NAND device
+ * @nand: NAND device
+ *
+ * Register a NAND device.
+ * This function is just a wrapper around mtd_device_register()
+ * registering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_register(struct nand_device *nand)
+{
+ return mtd_device_register(&nand->mtd, NULL, 0);
+}
+
+/**
+ * nanddev_unregister() - Unregister a NAND device
+ * @nand: NAND device
+ *
+ * Unregister a NAND device.
+ * This function is just a wrapper around mtd_device_unregister()
+ * unregistering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_unregister(struct nand_device *nand)
+{
+ return mtd_device_unregister(&nand->mtd);
+}
+
+/**
+ * nanddev_set_of_node() - Attach a DT node to a NAND device
+ * @nand: NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a NAND device.
+ */
+static inline void nanddev_set_of_node(struct nand_device *nand,
+ struct device_node *np)
+{
+ mtd_set_of_node(&nand->mtd, np);
+}
+
+/**
+ * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the DT node attached to @nand.
+ */
+static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
+{
+ return mtd_get_of_node(&nand->mtd);
+}
+
+/**
+ * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
+ * @nand: NAND device
+ * @offs: absolute NAND offset (usually passed by the MTD layer)
+ * @pos: a NAND position object to fill in
+ *
+ * Converts @offs into a nand_pos representation.
+ *
+ * Return: the offset within the NAND page pointed by @pos.
+ */
+static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
+ loff_t offs,
+ struct nand_pos *pos)
+{
+ unsigned int pageoffs;
+ u64 tmp = offs;
+
+ pageoffs = do_div(tmp, nand->memorg.pagesize);
+ pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
+ pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+ pos->lun = do_div(tmp, nand->memorg.luns_per_target);
+ pos->target = tmp;
+
+ return pageoffs;
+}
+
+/**
+ * nanddev_pos_cmp() - Compare two NAND positions
+ * @a: First NAND position
+ * @b: Second NAND position
+ *
+ * Compares two NAND positions.
+ *
+ * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
+ */
+static inline int nanddev_pos_cmp(const struct nand_pos *a,
+ const struct nand_pos *b)
+{
+ if (a->target != b->target)
+ return a->target < b->target ? -1 : 1;
+
+ if (a->lun != b->lun)
+ return a->lun < b->lun ? -1 : 1;
+
+ if (a->eraseblock != b->eraseblock)
+ return a->eraseblock < b->eraseblock ? -1 : 1;
+
+ if (a->page != b->page)
+ return a->page < b->page ? -1 : 1;
+
+ return 0;
+}
+
+/**
+ * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
+ * @nand: NAND device
+ * @pos: the NAND position to convert
+ *
+ * Converts @pos NAND position into an absolute offset.
+ *
+ * Return: the absolute offset. Note that @pos points to the beginning of a
+ * page, if one wants to point to a specific offset within this page
+ * the returned offset has to be adjusted manually.
+ */
+static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ unsigned int npages;
+
+ npages = pos->page +
+ ((pos->eraseblock +
+ (pos->lun +
+ (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun) *
+ nand->memorg.pages_per_eraseblock);
+
+ return (loff_t)npages * nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_pos_to_row() - Extract a row address from a NAND position
+ * @nand: NAND device
+ * @pos: the position to convert
+ *
+ * Converts a NAND position into a row address that can then be passed to the
+ * device.
+ *
+ * Return: the row address extracted from @pos.
+ */
+static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return (pos->lun << nand->rowconv.lun_addr_shift) |
+ (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
+ pos->page;
+}
+
+/**
+ * nanddev_pos_next_target() - Move a position to the next target/die
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next target/die. Useful when you
+ * want to iterate over all targets/dies of a NAND device.
+ */
+static inline void nanddev_pos_next_target(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+ pos->lun = 0;
+ pos->target++;
+}
+
+/**
+ * nanddev_pos_next_lun() - Move a position to the next LUN
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next LUN. Useful when you want to
+ * iterate over all LUNs of a NAND device.
+ */
+static inline void nanddev_pos_next_lun(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->lun >= nand->memorg.luns_per_target - 1)
+ return nanddev_pos_next_target(nand, pos);
+
+ pos->lun++;
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next eraseblock. Useful when you
+ * want to iterate over all eraseblocks of a NAND device.
+ */
+static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
+ return nanddev_pos_next_lun(nand, pos);
+
+ pos->eraseblock++;
+ pos->page = 0;
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next page. Useful when you want to
+ * iterate over all pages of a NAND device.
+ */
+static inline void nanddev_pos_next_page(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
+ return nanddev_pos_next_eraseblock(nand, pos);
+
+ pos->page++;
+}
+
+/**
+ * nand_io_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer.
+ */
+static inline void nanddev_io_iter_init(struct nand_device *nand,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = req->ooboffs;
+ iter->oobbytes_per_page = mtd_oobavail(mtd, req);
+ iter->dataleft = req->len;
+ iter->oobleft = req->ooblen;
+ iter->req.databuf.in = req->datbuf;
+ iter->req.datalen = min_t(unsigned int,
+ nand->memorg.pagesize - iter->req.dataoffs,
+ iter->dataleft);
+ iter->req.oobbuf.in = req->oobbuf;
+ iter->req.ooblen = min_t(unsigned int,
+ iter->oobbytes_per_page - iter->req.ooboffs,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_next_page - Move to the next page
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next page.
+ */
+static inline void nanddev_io_iter_next_page(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_page(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->oobleft -= iter->req.ooblen;
+ iter->req.oobbuf.in += iter->req.ooblen;
+ iter->req.dataoffs = 0;
+ iter->req.ooboffs = 0;
+ iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
+ iter->dataleft);
+ iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_end - Should end iteration or not
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Check whether @iter has reached the end of the NAND portion it was asked to
+ * iterate on or not.
+ *
+ * Return: true if @iter has reached the end of the iteration request, false
+ * otherwise.
+ */
+static inline bool nanddev_io_iter_end(struct nand_device *nand,
+ const struct nand_io_iter *iter)
+{
+ if (iter->dataleft || iter->oobleft)
+ return false;
+
+ return true;
+}
+
+/**
+ * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
+ * request
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterate over pages that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_page(nand, start, req, iter) \
+ for (nanddev_io_iter_init(nand, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_page(nand, iter))
+
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+
+/* BBT related functions */
+enum nand_bbt_block_status {
+ NAND_BBT_BLOCK_STATUS_UNKNOWN,
+ NAND_BBT_BLOCK_GOOD,
+ NAND_BBT_BLOCK_WORN,
+ NAND_BBT_BLOCK_RESERVED,
+ NAND_BBT_BLOCK_FACTORY_BAD,
+ NAND_BBT_BLOCK_NUM_STATUS,
+};
+
+int nanddev_bbt_init(struct nand_device *nand);
+void nanddev_bbt_cleanup(struct nand_device *nand);
+int nanddev_bbt_update(struct nand_device *nand);
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry);
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status);
+int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
+
+/**
+ * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
+ * @nand: NAND device
+ * @pos: the NAND position we want to get BBT entry for
+ *
+ * Return the BBT entry used to store information about the eraseblock pointed
+ * by @pos.
+ *
+ * Return: the BBT entry storing information about eraseblock pointed by @pos.
+ */
+static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return pos->eraseblock +
+ ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun);
+}
+
+/**
+ * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
+ * @nand: NAND device
+ *
+ * Return: true if the BBT has been initialized, false otherwise.
+ */
+static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
+{
+ return !!nand->bbt.cache;
+}
+
+/* MTD -> NAND helper functions. */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+
+#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 4d8406c81652..8a2decf7462c 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/nand_ecc.h
- *
* Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
* David Woodhouse <dwmw2@infradead.org>
* Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
index d0558a982628..357e88b3263a 100644
--- a/include/linux/mtd/ndfc.h
+++ b/include/linux/mtd/ndfc.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/mtd/ndfc.h
- *
* Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index c4beb70dacbd..11cb0c50cd84 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -77,6 +77,7 @@ struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
+ const struct of_device_id *of_match_table;
int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
struct mtd_part_parser_data *);
void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 56c5570aadbe..5dad59b31244 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,6 +21,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/types.h>
struct mtd_info;
struct nand_flash_dev;
@@ -235,7 +236,8 @@ struct nand_chip;
#define ONFI_TIMING_MODE_5 (1 << 5)
#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-/* ONFI feature address */
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
/* Vendor-specific feature address (Micron) */
@@ -429,6 +431,47 @@ struct nand_jedec_params {
__le16 crc;
} __packed;
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @async_timing_mode: Supported asynchronous timing mode
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ u16 async_timing_mode;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+/**
+ * struct nand_parameters - NAND generic parameters from the parameter page
+ * @model: Model name
+ * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @set_feature_list: Bitmap of features that can be set
+ * @get_feature_list: Bitmap of features that can be get
+ * @onfi: ONFI specific parameters
+ */
+struct nand_parameters {
+ /* Generic parameters */
+ char model[100];
+ bool supports_set_get_features;
+ DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+ DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+
+ /* ONFI parameters */
+ struct onfi_params onfi;
+};
+
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
@@ -1157,21 +1200,15 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* currently in data_buf.
* @subpagesize: [INTERN] holds the subpagesize
* @id: [INTERN] holds NAND ID
- * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
- * non 0 if ONFI supported.
- * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded),
- * non 0 if JEDEC supported.
- * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
- * supported, 0 otherwise.
- * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
- * supported, 0 otherwise.
+ * @parameters: [INTERN] holds generic parameters under an easily
+ * readable form.
* @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
* this nand device will encounter their life times.
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
* @read_retries: [INTERN] the number of read retry modes supported
- * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
- * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
+ * @set_features: [REPLACEABLE] set the NAND chip features
+ * @get_features: [REPLACEABLE] get the NAND chip features
* @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
* chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
* means the configuration should not be applied but
@@ -1212,10 +1249,10 @@ struct nand_chip {
bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
- int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
+ int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
+ int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
@@ -1243,12 +1280,7 @@ struct nand_chip {
int badblockbits;
struct nand_id id;
- int onfi_version;
- int jedec_version;
- union {
- struct nand_onfi_params onfi_params;
- struct nand_jedec_params jedec_params;
- };
+ struct nand_parameters parameters;
u16 max_bb_per_die;
u32 blocks_per_die;
@@ -1535,26 +1567,13 @@ struct platform_nand_data {
struct platform_nand_ctrl ctrl;
};
-/* return the supported features. */
-static inline int onfi_feature(struct nand_chip *chip)
-{
- return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
-}
-
/* return the supported asynchronous timing mode. */
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
{
- if (!chip->onfi_version)
+ if (!chip->parameters.onfi.version)
return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.async_timing_mode);
-}
-/* return the supported synchronous timing mode. */
-static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
-{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
+ return chip->parameters.onfi.async_timing_mode;
}
int onfi_fill_data_interface(struct nand_chip *chip,
@@ -1591,13 +1610,6 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
-/* return the supported JEDEC features. */
-static inline int jedec_feature(struct nand_chip *chip)
-{
- return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features)
- : 0;
-}
-
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
@@ -1629,10 +1641,12 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
+/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
/* Stub used by drivers that do not support GET/SET FEATURES operations */
-int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
- struct nand_chip *chip, int addr,
- u8 *subfeature_param);
+int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, u8 *subfeature_param);
/* Default read_page_raw implementation */
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/include/linux/node.h b/include/linux/node.h
index 4ece0fee0ffc..41f171861dcc 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -67,7 +67,7 @@ extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid);
+ int nid, bool check_nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index);
@@ -97,7 +97,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
return 0;
}
static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid)
+ int nid, bool check_nid)
{
return 0;
}
diff --git a/include/linux/of.h b/include/linux/of.h
index ebf22dd0860c..4d25e4f952d9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -363,6 +363,9 @@ extern struct device_node *of_parse_phandle(const struct device_node *np,
extern int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_args_map(const struct device_node *np,
+ const char *list_name, const char *stem_name, int index,
+ struct of_phandle_args *out_args);
extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
const char *list_name, int cells_count, int index,
struct of_phandle_args *out_args);
@@ -815,6 +818,15 @@ static inline int of_parse_phandle_with_args(const struct device_node *np,
return -ENOSYS;
}
+static inline int of_parse_phandle_with_args_map(const struct device_node *np,
+ const char *list_name,
+ const char *stem_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
const char *list_name, int cells_count, int index,
struct of_phandle_args *out_args)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 50c2b8786831..e34a27727b9a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -156,9 +156,18 @@ static __always_inline int PageCompound(struct page *page)
return test_bit(PG_head, &page->flags) || PageTail(page);
}
+#define PAGE_POISON_PATTERN -1l
+static inline int PagePoisoned(const struct page *page)
+{
+ return page->flags == PAGE_POISON_PATTERN;
+}
+
/*
* Page flags policies wrt compound pages
*
+ * PF_POISONED_CHECK
+ * check if this struct page poisoned/uninitialized
+ *
* PF_ANY:
* the page flag is relevant for small, head and tail pages.
*
@@ -176,17 +185,20 @@ static __always_inline int PageCompound(struct page *page)
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
*/
-#define PF_ANY(page, enforce) page
-#define PF_HEAD(page, enforce) compound_head(page)
+#define PF_POISONED_CHECK(page) ({ \
+ VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
+ page; })
+#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
+#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
#define PF_ONLY_HEAD(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(PageTail(page), page); \
- page;})
+ PF_POISONED_CHECK(page); })
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
- compound_head(page);})
+ PF_POISONED_CHECK(compound_head(page)); })
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
- page;})
+ PF_POISONED_CHECK(page); })
/*
* Macros to create function definitions for page flags
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 760d74a0e9a9..14d14beb1f7f 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -175,8 +175,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
- smp_mb();
- atomic_set(&page->_refcount, count);
+ atomic_set_release(&page->_refcount, count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index a1a5e5df0f66..af657ca58b70 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -39,10 +39,9 @@ struct pci_epc_ops {
int (*write_header)(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
int (*set_bar)(struct pci_epc *epc, u8 func_no,
- enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags);
+ struct pci_epf_bar *epf_bar);
void (*clear_bar)(struct pci_epc *epc, u8 func_no,
- enum pci_barno bar);
+ struct pci_epf_bar *epf_bar);
int (*map_addr)(struct pci_epc *epc, u8 func_no,
phys_addr_t addr, u64 pci_addr, size_t size);
void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
@@ -127,9 +126,9 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
- enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags);
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar);
+ struct pci_epf_bar *epf_bar);
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar);
int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index e897bf076701..f7d6f4883f8b 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -97,6 +97,8 @@ struct pci_epf_driver {
struct pci_epf_bar {
dma_addr_t phys_addr;
size_t size;
+ enum pci_barno barno;
+ int flags;
};
/**
diff --git a/include/linux/pci.h b/include/linux/pci.h
index ae42289662df..73178a2fcee0 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -256,6 +256,7 @@ enum pci_bus_speed {
PCIE_SPEED_2_5GT = 0x14,
PCIE_SPEED_5_0GT = 0x15,
PCIE_SPEED_8_0GT = 0x16,
+ PCIE_SPEED_16_0GT = 0x17,
PCI_SPEED_UNKNOWN = 0xff,
};
@@ -469,6 +470,9 @@ struct pci_host_bridge {
struct msi_controller *msi;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
+ unsigned int native_aer:1; /* OS may use PCIe AER */
+ unsigned int native_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_pme:1; /* OS may use PCIe PME */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
@@ -949,11 +953,6 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
-static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
- unsigned int devfn)
-{
- return pci_get_domain_bus_and_slot(0, bus, devfn);
-}
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
int pci_dev_present(const struct pci_device_id *ids);
@@ -1082,7 +1081,11 @@ int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
-void pcie_flr(struct pci_dev *dev);
+u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
+void pcie_print_link_status(struct pci_dev *dev);
+int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
int pci_reset_function_locked(struct pci_dev *dev);
@@ -1095,7 +1098,7 @@ int pci_reset_bus(struct pci_bus *bus);
int pci_try_reset_bus(struct pci_bus *bus);
void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
-void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
+int pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
@@ -1228,7 +1231,8 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data);
-int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+ resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
@@ -1297,7 +1301,6 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
-resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1448,10 +1451,8 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
#else
#define pcie_ports_disabled true
-#define pcie_ports_auto false
#endif
#ifdef CONFIG_PCIEASPM
@@ -1663,9 +1664,6 @@ static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
unsigned int devfn)
{ return NULL; }
-static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
- unsigned int devfn)
-{ return NULL; }
static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
unsigned int bus, unsigned int devfn)
{ return NULL; }
@@ -1925,6 +1923,7 @@ void pcibios_release_device(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
+resource_size_t pcibios_default_alignment(void);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
@@ -1957,6 +1956,11 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
+
+/* Arch may override these (weak) */
+int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
+int pcibios_sriov_disable(struct pci_dev *pdev);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#else
static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
{
@@ -2184,24 +2188,11 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
-static inline struct device_node *
-pci_device_to_OF_node(const struct pci_dev *pdev)
-{
- return pdev ? pdev->dev.of_node : NULL;
-}
-
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- return bus ? bus->dev.of_node : NULL;
-}
-
#else /* CONFIG_OF */
static inline void pci_set_of_node(struct pci_dev *dev) { }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
-static inline struct device_node *
-pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
static inline int pci_parse_request_of_pci_ranges(struct device *dev,
@@ -2212,6 +2203,17 @@ static inline int pci_parse_request_of_pci_ranges(struct device *dev,
}
#endif /* CONFIG_OF */
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev)
+{
+ return pdev ? pdev->dev.of_node : NULL;
+}
+
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ return bus ? bus->dev.of_node : NULL;
+}
+
#ifdef CONFIG_ACPI
struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
@@ -2282,41 +2284,9 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-/**
- * pci_uevent_ers - emit a uevent during recovery path of pci device
- * @pdev: pci device to check
- * @err_type: type of error event
- *
- */
-static inline void pci_uevent_ers(struct pci_dev *pdev,
- enum pci_ers_result err_type)
-{
- int idx = 0;
- char *envp[3];
-
- switch (err_type) {
- case PCI_ERS_RESULT_NONE:
- case PCI_ERS_RESULT_CAN_RECOVER:
- envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
- envp[idx++] = "DEVICE_ONLINE=0";
- break;
- case PCI_ERS_RESULT_RECOVERED:
- envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
- envp[idx++] = "DEVICE_ONLINE=1";
- break;
- case PCI_ERS_RESULT_DISCONNECT:
- envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
- envp[idx++] = "DEVICE_ONLINE=0";
- break;
- default:
- break;
- }
-
- if (idx > 0) {
- envp[idx++] = NULL;
- kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
- }
-}
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
+#endif
/* Provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2d61d9bde83d..cc608fc55334 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1334,6 +1334,7 @@
#define PCI_DEVICE_ID_IMS_TT3D 0x9135
#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_VENDOR_ID_AMPERE 0x1def
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_DEVICE_ID_INTERG_1682 0x1682
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
deleted file mode 100644
index b69769dbf659..000000000000
--- a/include/linux/pcieport_if.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * File: pcieport_if.h
- * Purpose: PCI Express Port Bus Driver's IF Data Structure
- *
- * Copyright (C) 2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#ifndef _PCIEPORT_IF_H_
-#define _PCIEPORT_IF_H_
-
-/* Port Type */
-#define PCIE_ANY_PORT (~0)
-
-/* Service Type */
-#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
-#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
-#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
-#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
-#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
-#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
-#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
-#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
-#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */
-#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
-
-struct pcie_device {
- int irq; /* Service IRQ/MSI/MSI-X Vector */
- struct pci_dev *port; /* Root/Upstream/Downstream Port */
- u32 service; /* Port service this device represents */
- void *priv_data; /* Service Private Data */
- struct device device; /* Generic Device Interface */
-};
-#define to_pcie_device(d) container_of(d, struct pcie_device, device)
-
-static inline void set_service_data(struct pcie_device *dev, void *data)
-{
- dev->priv_data = data;
-}
-
-static inline void *get_service_data(struct pcie_device *dev)
-{
- return dev->priv_data;
-}
-
-struct pcie_port_service_driver {
- const char *name;
- int (*probe) (struct pcie_device *dev);
- void (*remove) (struct pcie_device *dev);
- int (*suspend) (struct pcie_device *dev);
- int (*resume) (struct pcie_device *dev);
-
- /* Device driver may resume normal operations */
- void (*error_resume)(struct pci_dev *dev);
-
- /* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link) (struct pci_dev *dev);
-
- int port_type; /* Type of the port this driver can handle */
- u32 service; /* Port service this device represents */
-
- struct device_driver driver;
-};
-#define to_service_driver(d) \
- container_of(d, struct pcie_port_service_driver, driver)
-
-int pcie_port_service_register(struct pcie_port_service_driver *new);
-void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-
-#endif /* _PCIEPORT_IF_H_ */
diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h
index e684543254f3..e319d0a2ec82 100644
--- a/include/linux/platform_data/asoc-ti-mcbsp.h
+++ b/include/linux/platform_data/asoc-ti-mcbsp.h
@@ -25,10 +25,6 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
-#define MCBSP_CONFIG_TYPE2 0x2
-#define MCBSP_CONFIG_TYPE3 0x3
-#define MCBSP_CONFIG_TYPE4 0x4
-
/* Platform specific configuration */
struct omap_mcbsp_ops {
void (*request)(unsigned int);
@@ -47,14 +43,6 @@ struct omap_mcbsp_platform_data {
int (*force_ick_on)(struct clk *clk, bool force_on);
};
-/**
- * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod
- * @sidetone: name of the sidetone device
- */
-struct omap_mcbsp_dev_attr {
- const char *sidetone;
-};
-
void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata);
#endif
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index a19b78d826e9..757a0f9e26f9 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -20,12 +20,50 @@
#ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__
#define __PLATFORM_DATA_DMTIMER_OMAP_H__
+struct omap_dm_timer_ops {
+ struct omap_dm_timer *(*request_by_node)(struct device_node *np);
+ struct omap_dm_timer *(*request_specific)(int timer_id);
+ struct omap_dm_timer *(*request)(void);
+
+ int (*free)(struct omap_dm_timer *timer);
+
+ void (*enable)(struct omap_dm_timer *timer);
+ void (*disable)(struct omap_dm_timer *timer);
+
+ int (*get_irq)(struct omap_dm_timer *timer);
+ int (*set_int_enable)(struct omap_dm_timer *timer,
+ unsigned int value);
+ int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask);
+
+ struct clk *(*get_fclk)(struct omap_dm_timer *timer);
+
+ int (*start)(struct omap_dm_timer *timer);
+ int (*stop)(struct omap_dm_timer *timer);
+ int (*set_source)(struct omap_dm_timer *timer, int source);
+
+ int (*set_load)(struct omap_dm_timer *timer, int autoreload,
+ unsigned int value);
+ int (*set_match)(struct omap_dm_timer *timer, int enable,
+ unsigned int match);
+ int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger);
+ int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
+
+ unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ int (*write_counter)(struct omap_dm_timer *timer,
+ unsigned int value);
+ unsigned int (*read_status)(struct omap_dm_timer *timer);
+ int (*write_status)(struct omap_dm_timer *timer,
+ unsigned int value);
+};
+
struct dmtimer_platform_data {
/* set_timer_src - Only used for OMAP1 devices */
int (*set_timer_src)(struct platform_device *pdev, int source);
u32 timer_capability;
u32 timer_errata;
int (*get_context_loss_count)(struct device *);
+ const struct omap_dm_timer_ops *timer_ops;
};
#endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */
diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h
index b7baf1e42c55..9a3e78082883 100644
--- a/include/linux/platform_data/gpio-htc-egpio.h
+++ b/include/linux/platform_data/gpio-htc-egpio.h
@@ -6,8 +6,6 @@
#ifndef __HTC_EGPIO_H__
#define __HTC_EGPIO_H__
-#include <linux/gpio.h>
-
/* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */
#define HTC_EGPIO_OUTPUT (~0)
#define HTC_EGPIO_INPUT 0
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index cb2618147c34..8612855691b2 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -157,11 +157,6 @@
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-struct omap_gpio_dev_attr {
- int bank_width; /* GPIO bank width */
- bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
-};
-
struct omap_gpio_reg_offs {
u16 revision;
u16 direction;
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h
index b42ad83cbc20..4fd0f592a2d2 100644
--- a/include/linux/platform_data/mtd-nand-pxa3xx.h
+++ b/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -6,41 +6,22 @@
#include <linux/mtd/partitions.h>
/*
- * Current pxa3xx_nand controller has two chip select which
- * both be workable.
- *
- * Notice should be taken that:
- * When you want to use this feature, you should not enable the
- * keep configuration feature, for two chip select could be
- * attached with different nand chip. The different page size
- * and timing requirement make the keep configuration impossible.
+ * Current pxa3xx_nand controller has two chip select which both be workable but
+ * historically all platforms remaining on platform data used only one. Switch
+ * to device tree if you need more.
*/
-
-/* The max num of chip select current support */
-#define NUM_CHIP_SELECT (2)
struct pxa3xx_nand_platform_data {
-
- /* the data flash bus is shared between the Static Memory
- * Controller and the Data Flash Controller, the arbiter
- * controls the ownership of the bus
- */
- int enable_arbiter;
-
- /* allow platform code to keep OBM/bootloader defined NFC config */
- int keep_config;
-
- /* indicate how many chip selects will be used */
- int num_cs;
-
- /* use an flash-based bad block table */
- bool flash_bbt;
-
- /* requested ECC strength and ECC step size */
+ /* Keep OBM/bootloader NFC timing configuration */
+ bool keep_config;
+ /* Use a flash-based bad block table */
+ bool flash_bbt;
+ /* Requested ECC strength and ECC step size */
int ecc_strength, ecc_step_size;
-
- const struct mtd_partition *parts[NUM_CHIP_SELECT];
- unsigned int nr_parts[NUM_CHIP_SELECT];
+ /* Partitions */
+ const struct mtd_partition *parts;
+ unsigned int nr_parts;
};
extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
+
#endif /* __ASM_ARCH_PXA3XX_NAND_H */
diff --git a/include/linux/platform_data/phy-da8xx-usb.h b/include/linux/platform_data/phy-da8xx-usb.h
new file mode 100644
index 000000000000..85c2b99381b2
--- /dev/null
+++ b/include/linux/platform_data/phy-da8xx-usb.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__
+#define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__
+
+#include <linux/regmap.h>
+
+/**
+ * da8xx_usb_phy_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct da8xx_usb_phy_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
new file mode 100644
index 000000000000..f9bed2a0af9d
--- /dev/null
+++ b/include/linux/platform_data/pm33xx.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI pm33xx platform data
+ *
+ * Copyright (C) 2016-2018 Texas Instruments, Inc.
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_PM33XX_H
+#define _LINUX_PLATFORM_DATA_PM33XX_H
+
+#include <linux/kbuild.h>
+#include <linux/types.h>
+
+#ifndef __ASSEMBLER__
+struct am33xx_pm_sram_addr {
+ void (*do_wfi)(void);
+ unsigned long *do_wfi_sz;
+ unsigned long *resume_offset;
+ unsigned long *emif_sram_table;
+ unsigned long *ro_sram_data;
+};
+
+struct am33xx_pm_platform_data {
+ int (*init)(void);
+ int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long));
+ struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
+};
+
+struct am33xx_pm_sram_data {
+ u32 wfi_flags;
+ u32 l2_aux_ctrl_val;
+ u32 l2_prefetch_ctrl_val;
+} __packed __aligned(8);
+
+struct am33xx_pm_ro_sram_data {
+ u32 amx3_pm_sram_data_virt;
+ u32 amx3_pm_sram_data_phys;
+} __packed __aligned(8);
+
+#endif /* __ASSEMBLER__ */
+#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 13c83a25958a..0bf9fddb8306 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -2,10 +2,6 @@
#ifndef _OMAP2_MCSPI_H
#define _OMAP2_MCSPI_H
-#define OMAP2_MCSPI_REV 0
-#define OMAP3_MCSPI_REV 1
-#define OMAP4_MCSPI_REV 2
-
#define OMAP4_MCSPI_REG_OFFSET 0x100
#define MCSPI_PINDIR_D0_IN_D1_OUT 0
@@ -17,10 +13,6 @@ struct omap2_mcspi_platform_config {
unsigned int pin_dir:1;
};
-struct omap2_mcspi_dev_attr {
- unsigned short num_chipselect;
-};
-
struct omap2_mcspi_device_config {
unsigned turbo_mode:1;
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 1be356330b96..80ce28d40832 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -16,6 +16,10 @@ enum ti_sysc_module_type {
TI_SYSC_OMAP4_USB_HOST_FS,
};
+struct ti_sysc_cookie {
+ void *data;
+};
+
/**
* struct sysc_regbits - TI OCP_SYSCONFIG register field offsets
* @midle_shift: Offset of the midle bit
@@ -41,6 +45,7 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_QUIRK_LEGACY_IDLE BIT(8)
#define SYSC_QUIRK_RESET_STATUS BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5)
@@ -83,4 +88,49 @@ struct sysc_config {
u32 quirks;
};
+enum sysc_registers {
+ SYSC_REVISION,
+ SYSC_SYSCONFIG,
+ SYSC_SYSSTATUS,
+ SYSC_MAX_REGS,
+};
+
+/**
+ * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module
+ * @name: legacy "ti,hwmods" module name
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @offsets: array of register offsets as listed in enum sysc_registers
+ * @nr_offsets: number of registers
+ * @cap: interconnect target module capabilities
+ * @cfg: interconnect target module configuration
+ *
+ * This data is enough to allocate a new struct omap_hwmod_class_sysconfig
+ * based on device tree data parsed by ti-sysc driver.
+ */
+struct ti_sysc_module_data {
+ const char *name;
+ u64 module_pa;
+ u32 module_size;
+ int *offsets;
+ int nr_offsets;
+ const struct sysc_capabilities *cap;
+ struct sysc_config *cfg;
+};
+
+struct device;
+
+struct ti_sysc_platform_data {
+ struct of_dev_auxdata *auxdata;
+ int (*init_module)(struct device *dev,
+ const struct ti_sysc_module_data *data,
+ struct ti_sysc_cookie *cookie);
+ int (*enable_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*idle_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*shutdown_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+};
+
#endif /* __TI_SYSC_DATA_H__ */
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index d8b187c3925d..7b81dad712de 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -143,6 +143,13 @@
#define OMAP3430_SR_ERRWEIGHT 0x04
#define OMAP3430_SR_ERRMAXLIMIT 0x02
+enum sr_instance {
+ OMAP_SR_MPU, /* shared with iva on omap3 */
+ OMAP_SR_CORE,
+ OMAP_SR_IVA,
+ OMAP_SR_NR,
+};
+
struct omap_sr {
char *name;
struct list_head node;
@@ -207,7 +214,6 @@ struct omap_smartreflex_dev_attr {
const char *sensor_voltdm_name;
};
-#ifdef CONFIG_POWER_AVS_OMAP
/*
* The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
* The smartreflex class driver should pass the class type.
@@ -290,6 +296,8 @@ struct omap_sr_data {
struct voltagedomain *voltdm;
};
+#ifdef CONFIG_POWER_AVS_OMAP
+
/* Smartreflex module enable/disable interface */
void omap_sr_enable(struct voltagedomain *voltdm);
void omap_sr_disable(struct voltagedomain *voltdm);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index e9b603ee9953..6d7e800affd8 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -201,6 +201,7 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
+extern asmlinkage void dump_stack(void) __cold;
extern void printk_safe_init(void);
extern void printk_safe_flush(void);
extern void printk_safe_flush_on_panic(void);
@@ -264,6 +265,10 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
+static inline asmlinkage void dump_stack(void)
+{
+}
+
static inline void printk_safe_init(void)
{
}
@@ -279,8 +284,6 @@ static inline void printk_safe_flush_on_panic(void)
extern int kptr_restrict;
-extern asmlinkage void dump_stack(void) __cold;
-
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9395f06e8372..e6d226464838 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -39,6 +39,7 @@ struct persistent_ram_ecc_info {
int ecc_size;
int symsize;
int poly;
+ uint16_t *par;
};
struct persistent_ram_zone {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 5ac9de4fcd6f..ca9772c8e48b 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -267,7 +267,6 @@ struct dqstats {
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
-extern struct dqstats *dqstats_pcpu;
extern struct dqstats dqstats;
static inline void dqstats_inc(unsigned int type)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index a366cc314479..ea8505204fdf 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -106,6 +106,10 @@ extern const struct raid6_calls raid6_avx512x1;
extern const struct raid6_calls raid6_avx512x2;
extern const struct raid6_calls raid6_avx512x4;
extern const struct raid6_calls raid6_s390vx8;
+extern const struct raid6_calls raid6_vpermxor1;
+extern const struct raid6_calls raid6_vpermxor2;
+extern const struct raid6_calls raid6_vpermxor4;
+extern const struct raid6_calls raid6_vpermxor8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 31e1ff69efc8..ec8655514283 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -38,6 +38,7 @@ enum raid_level {
RAID_LEVEL_5,
RAID_LEVEL_50,
RAID_LEVEL_6,
+ RAID_LEVEL_JBOD,
};
struct raid_data {
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index adb88f8cefbc..9326d671b6e6 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -27,12 +27,38 @@ struct device_node;
struct of_phandle_args;
/**
+ * struct reset_control_lookup - represents a single lookup entry
+ *
+ * @list: internal list of all reset lookup entries
+ * @provider: name of the reset controller device controlling this reset line
+ * @index: ID of the reset controller in the reset controller device
+ * @dev_id: name of the device associated with this reset line
+ * @con_id name of the reset line (can be NULL)
+ */
+struct reset_control_lookup {
+ struct list_head list;
+ const char *provider;
+ unsigned int index;
+ const char *dev_id;
+ const char *con_id;
+};
+
+#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \
+ { \
+ .provider = _provider, \
+ .index = _index, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ }
+
+/**
* struct reset_controller_dev - reset controller entity that might
* provide multiple reset controls
* @ops: a pointer to device specific struct reset_control_ops
* @owner: kernel module of the reset controller driver
* @list: internal list of reset controller devices
* @reset_control_head: head of internal list of requested reset controls
+ * @dev: corresponding driver model device struct
* @of_node: corresponding device tree node as phandle target
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
@@ -44,6 +70,7 @@ struct reset_controller_dev {
struct module *owner;
struct list_head list;
struct list_head reset_control_head;
+ struct device *dev;
struct device_node *of_node;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
@@ -58,4 +85,7 @@ struct device;
int devm_reset_controller_register(struct device *dev,
struct reset_controller_dev *rcdev);
+void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries);
+
#endif
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 478acf6efac6..e964bbd03fc2 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -36,12 +36,12 @@
#define CHECK_REG_CMD 2
#define RTSX_HDBAR 0x08
-#define SG_INT 0x04
-#define SG_END 0x02
-#define SG_VALID 0x01
-#define SG_NO_OP 0x00
-#define SG_TRANS_DATA (0x02 << 4)
-#define SG_LINK_DESC (0x03 << 4)
+#define RTSX_SG_INT 0x04
+#define RTSX_SG_END 0x02
+#define RTSX_SG_VALID 0x01
+#define RTSX_SG_NO_OP 0x00
+#define RTSX_SG_TRANS_DATA (0x02 << 4)
+#define RTSX_SG_LINK_DESC (0x03 << 4)
#define RTSX_HDBCTLR 0x0C
#define SDMA_MODE 0x00
#define ADMA_MODE (0x02 << 26)
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 0dcc60e820de..841585f6e5f2 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -171,6 +171,8 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
* starting from the last allocated bit. This is less efficient
* than the default behavior (false).
*
+ * This operation provides acquire barrier semantics if it succeeds.
+ *
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
@@ -300,6 +302,12 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
+ unsigned int bitnr)
+{
+ clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
{
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index aa5d4eb725f5..51f52020ad5f 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -65,16 +65,18 @@ struct sg_table {
*/
#define SG_MAGIC 0x87654321
+#define SG_CHAIN 0x01UL
+#define SG_END 0x02UL
/*
* We overload the LSB of the page pointer to indicate whether it's
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & 0x01)
-#define sg_is_last(sg) ((sg)->page_link & 0x02)
+#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
+#define sg_is_last(sg) ((sg)->page_link & SG_END)
#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~0x03))
+ ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -88,13 +90,13 @@ struct sg_table {
**/
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
{
- unsigned long page_link = sg->page_link & 0x3;
+ unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END);
/*
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & 0x03);
+ BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
@@ -130,7 +132,7 @@ static inline struct page *sg_page(struct scatterlist *sg)
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~0x3);
+ return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
}
/**
@@ -178,7 +180,8 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
* Set lowest bit to indicate a link pointer, and make sure to clear
* the termination bit if it happens to be set.
*/
- prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
+ prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN)
+ & ~SG_END;
}
/**
@@ -198,8 +201,8 @@ static inline void sg_mark_end(struct scatterlist *sg)
/*
* Set termination bit, clear potential chain bit
*/
- sg->page_link |= 0x02;
- sg->page_link &= ~0x01;
+ sg->page_link |= SG_END;
+ sg->page_link &= ~SG_CHAIN;
}
/**
@@ -215,7 +218,7 @@ static inline void sg_unmark_end(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- sg->page_link &= ~0x02;
+ sg->page_link &= ~SG_END;
}
/**
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 23b4f9cb82db..a7ce74c74e49 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -319,7 +319,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
- const struct cred *, u32);
+ const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
new file mode 100644
index 000000000000..b458c87b866c
--- /dev/null
+++ b/include/linux/scmi_protocol.h
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Message Protocol driver header
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define SCMI_MAX_STR_SIZE 16
+#define SCMI_MAX_NUM_RATES 16
+
+/**
+ * struct scmi_revision_info - version information structure
+ *
+ * @major_ver: Major ABI version. Change here implies risk of backward
+ * compatibility break.
+ * @minor_ver: Minor ABI version. Change here implies new feature addition,
+ * or compatible change in ABI.
+ * @num_protocols: Number of protocols that are implemented, excluding the
+ * base protocol.
+ * @num_agents: Number of agents in the system.
+ * @impl_ver: A vendor-specific implementation version.
+ * @vendor_id: A vendor identifier(Null terminated ASCII string)
+ * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string)
+ */
+struct scmi_revision_info {
+ u16 major_ver;
+ u16 minor_ver;
+ u8 num_protocols;
+ u8 num_agents;
+ u32 impl_ver;
+ char vendor_id[SCMI_MAX_STR_SIZE];
+ char sub_vendor_id[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_clock_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool rate_discrete;
+ union {
+ struct {
+ int num_rates;
+ u64 rates[SCMI_MAX_NUM_RATES];
+ } list;
+ struct {
+ u64 min_rate;
+ u64 max_rate;
+ u64 step_size;
+ } range;
+ };
+};
+
+struct scmi_handle;
+
+/**
+ * struct scmi_clk_ops - represents the various operations provided
+ * by SCMI Clock Protocol
+ *
+ * @count_get: get the count of clocks provided by SCMI
+ * @info_get: get the information of the specified clock
+ * @rate_get: request the current clock rate of a clock
+ * @rate_set: set the clock rate of a clock
+ * @enable: enables the specified clock
+ * @disable: disables the specified clock
+ */
+struct scmi_clk_ops {
+ int (*count_get)(const struct scmi_handle *handle);
+
+ const struct scmi_clock_info *(*info_get)
+ (const struct scmi_handle *handle, u32 clk_id);
+ int (*rate_get)(const struct scmi_handle *handle, u32 clk_id,
+ u64 *rate);
+ int (*rate_set)(const struct scmi_handle *handle, u32 clk_id,
+ u32 config, u64 rate);
+ int (*enable)(const struct scmi_handle *handle, u32 clk_id);
+ int (*disable)(const struct scmi_handle *handle, u32 clk_id);
+};
+
+/**
+ * struct scmi_perf_ops - represents the various operations provided
+ * by SCMI Performance Protocol
+ *
+ * @limits_set: sets limits on the performance level of a domain
+ * @limits_get: gets limits on the performance level of a domain
+ * @level_set: sets the performance level of a domain
+ * @level_get: gets the performance level of a domain
+ * @device_domain_id: gets the scmi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @freq_set: sets the frequency for a given device using sustained frequency
+ * to sustained performance level mapping
+ * @freq_get: gets the frequency for a given device using sustained frequency
+ * to sustained performance level mapping
+ */
+struct scmi_perf_ops {
+ int (*limits_set)(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf);
+ int (*limits_get)(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf);
+ int (*level_set)(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll);
+ int (*level_get)(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll);
+ int (*device_domain_id)(struct device *dev);
+ int (*get_transition_latency)(const struct scmi_handle *handle,
+ struct device *dev);
+ int (*add_opps_to_device)(const struct scmi_handle *handle,
+ struct device *dev);
+ int (*freq_set)(const struct scmi_handle *handle, u32 domain,
+ unsigned long rate, bool poll);
+ int (*freq_get)(const struct scmi_handle *handle, u32 domain,
+ unsigned long *rate, bool poll);
+};
+
+/**
+ * struct scmi_power_ops - represents the various operations provided
+ * by SCMI Power Protocol
+ *
+ * @num_domains_get: get the count of power domains provided by SCMI
+ * @name_get: gets the name of a power domain
+ * @state_set: sets the power state of a power domain
+ * @state_get: gets the power state of a power domain
+ */
+struct scmi_power_ops {
+ int (*num_domains_get)(const struct scmi_handle *handle);
+ char *(*name_get)(const struct scmi_handle *handle, u32 domain);
+#define SCMI_POWER_STATE_TYPE_SHIFT 30
+#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
+#define SCMI_POWER_STATE_PARAM(type, id) \
+ ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \
+ ((id) & SCMI_POWER_STATE_ID_MASK))
+#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0)
+#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0)
+ int (*state_set)(const struct scmi_handle *handle, u32 domain,
+ u32 state);
+ int (*state_get)(const struct scmi_handle *handle, u32 domain,
+ u32 *state);
+};
+
+struct scmi_sensor_info {
+ u32 id;
+ u8 type;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+/*
+ * Partial list from Distributed Management Task Force (DMTF) specification:
+ * DSP0249 (Platform Level Data Model specification)
+ */
+enum scmi_sensor_class {
+ NONE = 0x0,
+ TEMPERATURE_C = 0x2,
+ VOLTAGE = 0x5,
+ CURRENT = 0x6,
+ POWER = 0x7,
+ ENERGY = 0x8,
+};
+
+/**
+ * struct scmi_sensor_ops - represents the various operations provided
+ * by SCMI Sensor Protocol
+ *
+ * @count_get: get the count of sensors provided by SCMI
+ * @info_get: get the information of the specified sensor
+ * @configuration_set: control notifications on cross-over events for
+ * the trip-points
+ * @trip_point_set: selects and configures a trip-point of interest
+ * @reading_get: gets the current value of the sensor
+ */
+struct scmi_sensor_ops {
+ int (*count_get)(const struct scmi_handle *handle);
+
+ const struct scmi_sensor_info *(*info_get)
+ (const struct scmi_handle *handle, u32 sensor_id);
+ int (*configuration_set)(const struct scmi_handle *handle,
+ u32 sensor_id);
+ int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id,
+ u8 trip_id, u64 trip_value);
+ int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
+ bool async, u64 *value);
+};
+
+/**
+ * struct scmi_handle - Handle returned to ARM SCMI clients for usage.
+ *
+ * @dev: pointer to the SCMI device
+ * @version: pointer to the structure containing SCMI version information
+ * @power_ops: pointer to set of power protocol operations
+ * @perf_ops: pointer to set of performance protocol operations
+ * @clk_ops: pointer to set of clock protocol operations
+ * @sensor_ops: pointer to set of sensor protocol operations
+ */
+struct scmi_handle {
+ struct device *dev;
+ struct scmi_revision_info *version;
+ struct scmi_perf_ops *perf_ops;
+ struct scmi_clk_ops *clk_ops;
+ struct scmi_power_ops *power_ops;
+ struct scmi_sensor_ops *sensor_ops;
+ /* for protocol internal use */
+ void *perf_priv;
+ void *clk_priv;
+ void *power_priv;
+ void *sensor_priv;
+};
+
+enum scmi_std_protocol {
+ SCMI_PROTOCOL_BASE = 0x10,
+ SCMI_PROTOCOL_POWER = 0x11,
+ SCMI_PROTOCOL_SYSTEM = 0x12,
+ SCMI_PROTOCOL_PERF = 0x13,
+ SCMI_PROTOCOL_CLOCK = 0x14,
+ SCMI_PROTOCOL_SENSOR = 0x15,
+};
+
+struct scmi_device {
+ u32 id;
+ u8 protocol_id;
+ struct device dev;
+ struct scmi_handle *handle;
+};
+
+#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
+
+struct scmi_device *
+scmi_device_create(struct device_node *np, struct device *parent, int protocol);
+void scmi_device_destroy(struct scmi_device *scmi_dev);
+
+struct scmi_device_id {
+ u8 protocol_id;
+};
+
+struct scmi_driver {
+ const char *name;
+ int (*probe)(struct scmi_device *sdev);
+ void (*remove)(struct scmi_device *sdev);
+ const struct scmi_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver)
+
+#ifdef CONFIG_ARM_SCMI_PROTOCOL
+int scmi_driver_register(struct scmi_driver *driver,
+ struct module *owner, const char *mod_name);
+void scmi_driver_unregister(struct scmi_driver *driver);
+#else
+static inline int
+scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
+#endif /* CONFIG_ARM_SCMI_PROTOCOL */
+
+#define scmi_register(driver) \
+ scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define scmi_unregister(driver) \
+ scmi_driver_unregister(driver)
+
+/**
+ * module_scmi_driver() - Helper macro for registering a scmi driver
+ * @__scmi_driver: scmi_driver structure
+ *
+ * Helper macro for scmi drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_scmi_driver(__scmi_driver) \
+ module_driver(__scmi_driver, scmi_register, scmi_unregister)
+
+typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
+int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
+void scmi_protocol_unregister(int protocol_id);
diff --git a/include/linux/security.h b/include/linux/security.h
index 128e1e4a5346..200920f521a1 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -112,6 +112,7 @@ struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
+struct sctp_endpoint;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -321,6 +322,7 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
+void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
@@ -344,7 +346,7 @@ int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
int security_task_kill(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
+ int sig, const struct cred *cred);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
@@ -1007,7 +1009,7 @@ static inline int security_task_movememory(struct task_struct *p)
static inline int security_task_kill(struct task_struct *p,
struct siginfo *info, int sig,
- u32 secid)
+ const struct cred *cred)
{
return 0;
}
@@ -1226,6 +1228,11 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
+int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen);
+void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+ struct sock *newsk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
@@ -1418,6 +1425,25 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
+
+static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address,
+ int addrlen)
+{
+ return 0;
+}
+
+static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+ struct sock *sk,
+ struct sock *newsk)
+{
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index a7f004a3c177..463ed28d2b27 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Internal header file for Samsung S3C2410 serial ports (UART0-2)
*
@@ -10,21 +11,7 @@
* Internal header file for MX1ADS serial ports (UART1 & 2)
*
* Copyright (C) 2002 Shane Nay (shane@minirl.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
+ */
#ifndef __ASM_ARM_REGS_SERIAL_H
#define __ASM_ARM_REGS_SERIAL_H
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 231abc8976c5..81ebd71f8c03 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -125,7 +125,6 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
-#include <linux/kmemleak.h>
#include <linux/kasan.h>
struct mem_cgroup;
@@ -137,12 +136,13 @@ bool slab_is_available(void);
extern bool usercopy_fallback;
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, slab_flags_t flags,
+struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
struct kmem_cache *kmem_cache_create_usercopy(const char *name,
- size_t size, size_t align, slab_flags_t flags,
- size_t useroffset, size_t usersize,
+ unsigned int size, unsigned int align,
+ slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
@@ -308,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
*/
-static __always_inline int kmalloc_index(size_t size)
+static __always_inline unsigned int kmalloc_index(size_t size)
{
if (!size)
return 0;
@@ -504,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
+ unsigned int index = kmalloc_index(size);
if (!index)
return ZERO_SIZE_PTR;
@@ -522,11 +522,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
* return size or 0 if a kmalloc cache for that
* size does not exist
*/
-static __always_inline int kmalloc_size(int n)
+static __always_inline unsigned int kmalloc_size(unsigned int n)
{
#ifndef CONFIG_SLOB
if (n > 2)
- return 1 << n;
+ return 1U << n;
if (n == 1 && KMALLOC_MIN_SIZE <= 32)
return 96;
@@ -542,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int i = kmalloc_index(size);
+ unsigned int i = kmalloc_index(size);
if (!i)
return ZERO_SIZE_PTR;
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 7385547c04b1..d9228e4d0320 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -85,8 +85,8 @@ struct kmem_cache {
unsigned int *random_seq;
#endif
- size_t useroffset; /* Usercopy region offset */
- size_t usersize; /* Usercopy region size */
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 8ad99c47b19c..3773e26c08c1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -73,7 +73,7 @@ struct kmem_cache_cpu {
* given order would contain.
*/
struct kmem_cache_order_objects {
- unsigned long x;
+ unsigned int x;
};
/*
@@ -84,11 +84,12 @@ struct kmem_cache {
/* Used for retriving partial slabs etc */
slab_flags_t flags;
unsigned long min_partial;
- int size; /* The size of an object including meta data */
- int object_size; /* The size of an object without meta data */
- int offset; /* Free pointer offset. */
+ unsigned int size; /* The size of an object including meta data */
+ unsigned int object_size;/* The size of an object without meta data */
+ unsigned int offset; /* Free pointer offset. */
#ifdef CONFIG_SLUB_CPU_PARTIAL
- int cpu_partial; /* Number of per cpu partial objects to keep around */
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
#endif
struct kmem_cache_order_objects oo;
@@ -98,10 +99,10 @@ struct kmem_cache {
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *);
- int inuse; /* Offset to metadata */
- int align; /* Alignment */
- int reserved; /* Reserved bytes at the end of slabs */
- int red_left_pad; /* Left redzone padding size */
+ unsigned int inuse; /* Offset to metadata */
+ unsigned int align; /* Alignment */
+ unsigned int reserved; /* Reserved bytes at the end of slabs */
+ unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
@@ -110,7 +111,8 @@ struct kmem_cache {
#endif
#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
- int max_attr_size; /* for propagation, maximum size of a stored attr */
+ /* for propagation, maximum size of a stored attr */
+ unsigned int max_attr_size;
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
#endif
@@ -124,7 +126,7 @@ struct kmem_cache {
/*
* Defragmentation by allocating from a remote node.
*/
- int remote_node_defrag_ratio;
+ unsigned int remote_node_defrag_ratio;
#endif
#ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -135,8 +137,8 @@ struct kmem_cache {
struct kasan_cache kasan_info;
#endif
- size_t useroffset; /* Usercopy region offset */
- size_t usersize; /* Usercopy region size */
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index b0a507d356ef..fd25f0148566 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -21,6 +21,10 @@
#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1)
+#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2)
+#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8)
+
#define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17))
#define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25))
#define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index e57eb4b6cc5a..fc0b445bb36b 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Header for EXYNOS PMU Driver support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SOC_EXYNOS_PMU_H
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index bebdde5dccd6..66dcb9ec273a 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - Power management unit definition
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
* Notice:
* This is not a list of all Exynos Power Management Unit SFRs.
* There are too many of them, not mentioning subtle differences
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 786ae2255f05..574368e8a16f 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -272,6 +272,7 @@ struct svc_rqst {
#define RQ_BUSY (6) /* request is busy */
#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
+ ktime_t rq_qtime; /* enqueue time */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
@@ -283,6 +284,7 @@ struct svc_rqst {
int rq_reserved; /* space on socket outq
* reserved for this request
*/
+ ktime_t rq_stime; /* start time */
struct cache_req rq_chandle; /* handle passed to caches for
* request delaying
@@ -493,6 +495,10 @@ void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
+unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct kvec *first, size_t total);
+char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
+ struct kvec *first, size_t total);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 4b731b046bcd..7337e1221590 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -132,9 +132,6 @@ struct svcxprt_rdma {
#define RDMAXPRT_CONN_PENDING 3
#define RPCRDMA_LISTEN_BACKLOG 10
-/* The default ORD value is based on two outstanding full-size writes with a
- * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
-#define RPCRDMA_ORD (64/4)
#define RPCRDMA_MAX_REQUESTS 32
/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 1caf7bc83306..c3d72066d4b1 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,7 +25,7 @@ struct svc_xprt_ops {
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
- int (*xpo_secure_port)(struct svc_rqst *);
+ void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
};
@@ -83,6 +83,7 @@ struct svc_xprt {
size_t xpt_locallen; /* length of address */
struct sockaddr_storage xpt_remote; /* remote peer's address */
size_t xpt_remotelen; /* length of address */
+ char xpt_remotebuf[INET6_ADDRSTRLEN + 10];
struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
struct list_head xpt_users; /* callbacks on free */
@@ -152,7 +153,10 @@ static inline void svc_xprt_set_remote(struct svc_xprt *xprt,
{
memcpy(&xprt->xpt_remote, sa, salen);
xprt->xpt_remotelen = salen;
+ snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1,
+ "%pISpc", sa);
}
+
static inline unsigned short svc_addr_port(const struct sockaddr *sa)
{
const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a1a3f4ed94ce..2417d288e016 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
#define SWAP_ADDRESS_SPACE_SHIFT 14
#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space *swapper_spaces[];
-extern bool swap_vma_readahead;
#define swap_address_space(entry) \
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
>> SWAP_ADDRESS_SPACE_SHIFT])
@@ -422,14 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated);
-extern struct page *swapin_readahead(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr);
-
-extern struct page *swap_readahead_detect(struct vm_fault *vmf,
- struct vma_swap_readahead *swap_ra);
-extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
- struct vm_fault *vmf,
- struct vma_swap_readahead *swap_ra);
+extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);
+extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
@@ -437,11 +432,6 @@ extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
extern bool has_usable_swap(void);
-static inline bool swap_use_vma_readahead(void)
-{
- return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
-}
-
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
{
@@ -537,26 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp)
{
}
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr)
+static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+ gfp_t gfp_mask, struct vm_fault *vmf)
{
return NULL;
}
-static inline bool swap_use_vma_readahead(void)
-{
- return false;
-}
-
-static inline struct page *swap_readahead_detect(
- struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
-{
- return NULL;
-}
-
-static inline struct page *do_swap_page_readahead(
- swp_entry_t fentry, gfp_t gfp_mask,
- struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+ struct vm_fault *vmf)
{
return NULL;
}
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index bcdd3790e94d..06639fb6ab85 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -44,7 +44,7 @@ struct tpm_class_ops {
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
int (*request_locality)(struct tpm_chip *chip, int loc);
- void (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ int (*relinquish_locality)(struct tpm_chip *chip, int loc);
void (*clk_enable)(struct tpm_chip *chip, bool value);
};
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index 3119d0ace7aa..aaafecf073ff 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -34,14 +34,14 @@
*
*/
-static inline bool uac2_control_is_readable(u32 bmControls, u8 control)
+static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control)
{
- return (bmControls >> (control * 2)) & 0x1;
+ return (bmControls >> ((control - 1) * 2)) & 0x1;
}
-static inline bool uac2_control_is_writeable(u32 bmControls, u8 control)
+static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control)
{
- return (bmControls >> (control * 2)) & 0x2;
+ return (bmControls >> ((control - 1) * 2)) & 0x2;
}
/* 4.7.2 Class-Specific AC Interface Descriptor */
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
new file mode 100644
index 000000000000..a8959aaba0ae
--- /dev/null
+++ b/include/linux/usb/audio-v3.h
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * This file holds USB constants and structures defined
+ * by the USB DEVICE CLASS DEFINITION FOR AUDIO DEVICES Release 3.0.
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+/*
+ * v1.0, v2.0 and v3.0 of this standard have many things in common. For the rest
+ * of the definitions, please refer to audio.h and audio-v2.h
+ */
+
+/* All High Capability descriptors have these 2 fields at the beginning */
+struct uac3_hc_descriptor_header {
+ __le16 wLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le16 wDescriptorID;
+} __attribute__ ((packed));
+
+/* 4.3.1 CLUSTER DESCRIPTOR HEADER */
+struct uac3_cluster_header_descriptor {
+ __le16 wLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le16 wDescriptorID;
+ __u8 bNrChannels;
+} __attribute__ ((packed));
+
+/* 4.3.2.1 SEGMENTS */
+struct uac3_cluster_segment_descriptor {
+ __le16 wLength;
+ __u8 bSegmentType;
+ /* __u8[0]; segment-specific data */
+} __attribute__ ((packed));
+
+/* 4.3.2.1.1 END SEGMENT */
+struct uac3_cluster_end_segment_descriptor {
+ __le16 wLength;
+ __u8 bSegmentType; /* Constant END_SEGMENT */
+} __attribute__ ((packed));
+
+/* 4.3.2.1.3.1 INFORMATION SEGMENT */
+struct uac3_cluster_information_segment_descriptor {
+ __le16 wLength;
+ __u8 bSegmentType;
+ __u8 bChPurpose;
+ __u8 bChRelationship;
+ __u8 bChGroupID;
+} __attribute__ ((packed));
+
+/* 4.5.2 CLASS-SPECIFIC AC INTERFACE DESCRIPTOR */
+struct uac3_ac_header_descriptor {
+ __u8 bLength; /* 10 */
+ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */
+ __u8 bDescriptorSubtype; /* HEADER descriptor subtype */
+ __u8 bCategory;
+
+ /* includes Clock Source, Unit, Terminal, and Power Domain desc. */
+ __le16 wTotalLength;
+
+ __le32 bmControls;
+} __attribute__ ((packed));
+
+/* 4.5.2.1 INPUT TERMINAL DESCRIPTOR */
+struct uac3_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bCSourceID;
+ __le32 bmControls;
+ __le16 wClusterDescrID;
+ __le16 wExTerminalDescrID;
+ __le16 wConnectorsDescrID;
+ __le16 wTerminalDescrStr;
+} __attribute__((packed));
+
+/* 4.5.2.2 OUTPUT TERMINAL DESCRIPTOR */
+struct uac3_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 bCSourceID;
+ __le32 bmControls;
+ __le16 wExTerminalDescrID;
+ __le16 wConnectorsDescrID;
+ __le16 wTerminalDescrStr;
+} __attribute__((packed));
+
+/* 4.5.2.7 FEATURE UNIT DESCRIPTOR */
+struct uac3_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ /* bmaControls is actually u32,
+ * but u8 is needed for the hybrid parser */
+ __u8 bmaControls[0]; /* variable length */
+ /* wFeatureDescrStr omitted */
+} __attribute__((packed));
+
+#define UAC3_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 4)
+
+/* As above, but more useful for defining your own descriptors */
+#define DECLARE_UAC3_FEATURE_UNIT_DESCRIPTOR(ch) \
+struct uac3_feature_unit_descriptor_##ch { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bUnitID; \
+ __u8 bSourceID; \
+ __le32 bmaControls[ch + 1]; \
+ __le16 wFeatureDescrStr; \
+} __attribute__ ((packed))
+
+/* 4.5.2.12 CLOCK SOURCE DESCRIPTOR */
+struct uac3_clock_source_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bmAttributes;
+ __le32 bmControls;
+ __u8 bReferenceTerminal;
+ __le16 wClockSourceStr;
+} __attribute__((packed));
+
+/* bmAttribute fields */
+#define UAC3_CLOCK_SOURCE_TYPE_EXT 0x0
+#define UAC3_CLOCK_SOURCE_TYPE_INT 0x1
+#define UAC3_CLOCK_SOURCE_ASYNC (0 << 2)
+#define UAC3_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 1)
+
+/* 4.5.2.13 CLOCK SELECTOR DESCRIPTOR */
+struct uac3_clock_selector_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bNrInPins;
+ __u8 baCSourceID[];
+ /* bmControls and wCSelectorDescrStr omitted */
+} __attribute__((packed));
+
+/* 4.5.2.14 CLOCK MULTIPLIER DESCRIPTOR */
+struct uac3_clock_multiplier_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bCSourceID;
+ __le32 bmControls;
+ __le16 wCMultiplierDescrStr;
+} __attribute__((packed));
+
+/* 4.5.2.15 POWER DOMAIN DESCRIPTOR */
+struct uac3_power_domain_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bPowerDomainID;
+ __le16 waRecoveryTime1;
+ __le16 waRecoveryTime2;
+ __u8 bNrEntities;
+ __u8 baEntityID[];
+ /* wPDomainDescrStr omitted */
+} __attribute__((packed));
+
+/* As above, but more useful for defining your own descriptors */
+#define DECLARE_UAC3_POWER_DOMAIN_DESCRIPTOR(n) \
+struct uac3_power_domain_descriptor_##n { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bPowerDomainID; \
+ __le16 waRecoveryTime1; \
+ __le16 waRecoveryTime2; \
+ __u8 bNrEntities; \
+ __u8 baEntityID[n]; \
+ __le16 wPDomainDescrStr; \
+} __attribute__ ((packed))
+
+/* 4.7.2 CLASS-SPECIFIC AS INTERFACE DESCRIPTOR */
+struct uac3_as_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalLink;
+ __le32 bmControls;
+ __le16 wClusterDescrID;
+ __le64 bmFormats;
+ __u8 bSubslotSize;
+ __u8 bBitResolution;
+ __le16 bmAuxProtocols;
+ __u8 bControlSize;
+} __attribute__((packed));
+
+#define UAC3_FORMAT_TYPE_I_RAW_DATA (1 << 6)
+
+/* 4.8.1.2 CLASS-SPECIFIC AS ISOCHRONOUS AUDIO DATA ENDPOINT DESCRIPTOR */
+struct uac3_iso_endpoint_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le32 bmControls;
+ __u8 bLockDelayUnits;
+ __le16 wLockDelay;
+} __attribute__((packed));
+
+/* 6.1 INTERRUPT DATA MESSAGE */
+struct uac3_interrupt_data_msg {
+ __u8 bInfo;
+ __u8 bSourceType;
+ __le16 wValue;
+ __le16 wIndex;
+} __attribute__((packed));
+
+/* A.2 AUDIO AUDIO FUNCTION SUBCLASS CODES */
+#define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00
+#define UAC3_FUNCTION_SUBCLASS_FULL_ADC_3_0 0x01
+/* BADD profiles */
+#define UAC3_FUNCTION_SUBCLASS_GENERIC_IO 0x20
+#define UAC3_FUNCTION_SUBCLASS_HEADPHONE 0x21
+#define UAC3_FUNCTION_SUBCLASS_SPEAKER 0x22
+#define UAC3_FUNCTION_SUBCLASS_MICROPHONE 0x23
+#define UAC3_FUNCTION_SUBCLASS_HEADSET 0x24
+#define UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER 0x25
+#define UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE 0x26
+
+/* A.7 AUDIO FUNCTION CATEGORY CODES */
+#define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00
+#define UAC3_FUNCTION_DESKTOP_SPEAKER 0x01
+#define UAC3_FUNCTION_HOME_THEATER 0x02
+#define UAC3_FUNCTION_MICROPHONE 0x03
+#define UAC3_FUNCTION_HEADSET 0x04
+#define UAC3_FUNCTION_TELEPHONE 0x05
+#define UAC3_FUNCTION_CONVERTER 0x06
+#define UAC3_FUNCTION_SOUND_RECORDER 0x07
+#define UAC3_FUNCTION_IO_BOX 0x08
+#define UAC3_FUNCTION_MUSICAL_INSTRUMENT 0x09
+#define UAC3_FUNCTION_PRO_AUDIO 0x0a
+#define UAC3_FUNCTION_AUDIO_VIDEO 0x0b
+#define UAC3_FUNCTION_CONTROL_PANEL 0x0c
+#define UAC3_FUNCTION_HEADPHONE 0x0d
+#define UAC3_FUNCTION_GENERIC_SPEAKER 0x0e
+#define UAC3_FUNCTION_HEADSET_ADAPTER 0x0f
+#define UAC3_FUNCTION_SPEAKERPHONE 0x10
+#define UAC3_FUNCTION_OTHER 0xff
+
+/* A.8 AUDIO CLASS-SPECIFIC DESCRIPTOR TYPES */
+#define UAC3_CS_UNDEFINED 0x20
+#define UAC3_CS_DEVICE 0x21
+#define UAC3_CS_CONFIGURATION 0x22
+#define UAC3_CS_STRING 0x23
+#define UAC3_CS_INTERFACE 0x24
+#define UAC3_CS_ENDPOINT 0x25
+#define UAC3_CS_CLUSTER 0x26
+
+/* A.10 CLUSTER DESCRIPTOR SEGMENT TYPES */
+#define UAC3_SEGMENT_UNDEFINED 0x00
+#define UAC3_CLUSTER_DESCRIPTION 0x01
+#define UAC3_CLUSTER_VENDOR_DEFINED 0x1F
+#define UAC3_CHANNEL_INFORMATION 0x20
+#define UAC3_CHANNEL_AMBISONIC 0x21
+#define UAC3_CHANNEL_DESCRIPTION 0x22
+#define UAC3_CHANNEL_VENDOR_DEFINED 0xFE
+#define UAC3_END_SEGMENT 0xFF
+
+/* A.11 CHANNEL PURPOSE DEFINITIONS */
+#define UAC3_PURPOSE_UNDEFINED 0x00
+#define UAC3_PURPOSE_GENERIC_AUDIO 0x01
+#define UAC3_PURPOSE_VOICE 0x02
+#define UAC3_PURPOSE_SPEECH 0x03
+#define UAC3_PURPOSE_AMBIENT 0x04
+#define UAC3_PURPOSE_REFERENCE 0x05
+#define UAC3_PURPOSE_ULTRASONIC 0x06
+#define UAC3_PURPOSE_VIBROKINETIC 0x07
+#define UAC3_PURPOSE_NON_AUDIO 0xFF
+
+/* A.12 CHANNEL RELATIONSHIP DEFINITIONS */
+#define UAC3_CH_RELATIONSHIP_UNDEFINED 0x00
+#define UAC3_CH_MONO 0x01
+#define UAC3_CH_LEFT 0x02
+#define UAC3_CH_RIGHT 0x03
+#define UAC3_CH_ARRAY 0x04
+#define UAC3_CH_PATTERN_X 0x20
+#define UAC3_CH_PATTERN_Y 0x21
+#define UAC3_CH_PATTERN_A 0x22
+#define UAC3_CH_PATTERN_B 0x23
+#define UAC3_CH_PATTERN_M 0x24
+#define UAC3_CH_PATTERN_S 0x25
+#define UAC3_CH_FRONT_LEFT 0x80
+#define UAC3_CH_FRONT_RIGHT 0x81
+#define UAC3_CH_FRONT_CENTER 0x82
+#define UAC3_CH_FRONT_LEFT_OF_CENTER 0x83
+#define UAC3_CH_FRONT_RIGHT_OF_CENTER 0x84
+#define UAC3_CH_FRONT_WIDE_LEFT 0x85
+#define UAC3_CH_FRONT_WIDE_RIGHT 0x86
+#define UAC3_CH_SIDE_LEFT 0x87
+#define UAC3_CH_SIDE_RIGHT 0x88
+#define UAC3_CH_SURROUND_ARRAY_LEFT 0x89
+#define UAC3_CH_SURROUND_ARRAY_RIGHT 0x8A
+#define UAC3_CH_BACK_LEFT 0x8B
+#define UAC3_CH_BACK_RIGHT 0x8C
+#define UAC3_CH_BACK_CENTER 0x8D
+#define UAC3_CH_BACK_LEFT_OF_CENTER 0x8E
+#define UAC3_CH_BACK_RIGHT_OF_CENTER 0x8F
+#define UAC3_CH_BACK_WIDE_LEFT 0x90
+#define UAC3_CH_BACK_WIDE_RIGHT 0x91
+#define UAC3_CH_TOP_CENTER 0x92
+#define UAC3_CH_TOP_FRONT_LEFT 0x93
+#define UAC3_CH_TOP_FRONT_RIGHT 0x94
+#define UAC3_CH_TOP_FRONT_CENTER 0x95
+#define UAC3_CH_TOP_FRONT_LOC 0x96
+#define UAC3_CH_TOP_FRONT_ROC 0x97
+#define UAC3_CH_TOP_FRONT_WIDE_LEFT 0x98
+#define UAC3_CH_TOP_FRONT_WIDE_RIGHT 0x99
+#define UAC3_CH_TOP_SIDE_LEFT 0x9A
+#define UAC3_CH_TOP_SIDE_RIGHT 0x9B
+#define UAC3_CH_TOP_SURR_ARRAY_LEFT 0x9C
+#define UAC3_CH_TOP_SURR_ARRAY_RIGHT 0x9D
+#define UAC3_CH_TOP_BACK_LEFT 0x9E
+#define UAC3_CH_TOP_BACK_RIGHT 0x9F
+#define UAC3_CH_TOP_BACK_CENTER 0xA0
+#define UAC3_CH_TOP_BACK_LOC 0xA1
+#define UAC3_CH_TOP_BACK_ROC 0xA2
+#define UAC3_CH_TOP_BACK_WIDE_LEFT 0xA3
+#define UAC3_CH_TOP_BACK_WIDE_RIGHT 0xA4
+#define UAC3_CH_BOTTOM_CENTER 0xA5
+#define UAC3_CH_BOTTOM_FRONT_LEFT 0xA6
+#define UAC3_CH_BOTTOM_FRONT_RIGHT 0xA7
+#define UAC3_CH_BOTTOM_FRONT_CENTER 0xA8
+#define UAC3_CH_BOTTOM_FRONT_LOC 0xA9
+#define UAC3_CH_BOTTOM_FRONT_ROC 0xAA
+#define UAC3_CH_BOTTOM_FRONT_WIDE_LEFT 0xAB
+#define UAC3_CH_BOTTOM_FRONT_WIDE_RIGHT 0xAC
+#define UAC3_CH_BOTTOM_SIDE_LEFT 0xAD
+#define UAC3_CH_BOTTOM_SIDE_RIGHT 0xAE
+#define UAC3_CH_BOTTOM_SURR_ARRAY_LEFT 0xAF
+#define UAC3_CH_BOTTOM_SURR_ARRAY_RIGHT 0xB0
+#define UAC3_CH_BOTTOM_BACK_LEFT 0xB1
+#define UAC3_CH_BOTTOM_BACK_RIGHT 0xB2
+#define UAC3_CH_BOTTOM_BACK_CENTER 0xB3
+#define UAC3_CH_BOTTOM_BACK_LOC 0xB4
+#define UAC3_CH_BOTTOM_BACK_ROC 0xB5
+#define UAC3_CH_BOTTOM_BACK_WIDE_LEFT 0xB6
+#define UAC3_CH_BOTTOM_BACK_WIDE_RIGHT 0xB7
+#define UAC3_CH_LOW_FREQUENCY_EFFECTS 0xB8
+#define UAC3_CH_LFE_LEFT 0xB9
+#define UAC3_CH_LFE_RIGHT 0xBA
+#define UAC3_CH_HEADPHONE_LEFT 0xBB
+#define UAC3_CH_HEADPHONE_RIGHT 0xBC
+
+/* A.15 AUDIO CLASS-SPECIFIC AC INTERFACE DESCRIPTOR SUBTYPES */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC3_EXTENDED_TERMINAL 0x04
+#define UAC3_MIXER_UNIT 0x05
+#define UAC3_SELECTOR_UNIT 0x06
+#define UAC3_FEATURE_UNIT 0x07
+#define UAC3_EFFECT_UNIT 0x08
+#define UAC3_PROCESSING_UNIT 0x09
+#define UAC3_EXTENSION_UNIT 0x0a
+#define UAC3_CLOCK_SOURCE 0x0b
+#define UAC3_CLOCK_SELECTOR 0x0c
+#define UAC3_CLOCK_MULTIPLIER 0x0d
+#define UAC3_SAMPLE_RATE_CONVERTER 0x0e
+#define UAC3_CONNECTORS 0x0f
+#define UAC3_POWER_DOMAIN 0x10
+
+/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */
+/* see audio-v2.h for the rest, which is identical to v2 */
+#define UAC3_CS_REQ_INTEN 0x04
+#define UAC3_CS_REQ_STRING 0x05
+#define UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR 0x06
+
+/* A.23.1 AUDIOCONTROL INTERFACE CONTROL SELECTORS */
+#define UAC3_AC_CONTROL_UNDEFINED 0x00
+#define UAC3_AC_ACTIVE_INTERFACE_CONTROL 0x01
+#define UAC3_AC_POWER_DOMAIN_CONTROL 0x02
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e3424234b23a..847f423ad9b3 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -806,6 +806,7 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget,
/* utility to simplify map/unmap of usb_requests to/from DMA */
+#ifdef CONFIG_HAS_DMA
extern int usb_gadget_map_request_by_dev(struct device *dev,
struct usb_request *req, int is_in);
extern int usb_gadget_map_request(struct usb_gadget *gadget,
@@ -815,6 +816,17 @@ extern void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in);
extern void usb_gadget_unmap_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in);
+#else /* !CONFIG_HAS_DMA */
+static inline int usb_gadget_map_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in) { return -ENOSYS; }
+static inline int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in) { return -ENOSYS; }
+
+static inline void usb_gadget_unmap_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in) { }
+static inline void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in) { }
+#endif /* !CONFIG_HAS_DMA */
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 57a8e98f2708..2219cce81ca4 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
+size_t zs_huge_class_size(struct zs_pool *pool);
+
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
diff --git a/include/media/i2c/saa6588.h b/include/media/i2c/saa6588.h
index b5ec1aa60ed5..a0825f532f71 100644
--- a/include/media/i2c/saa6588.h
+++ b/include/media/i2c/saa6588.h
@@ -32,6 +32,7 @@ struct saa6588_command {
unsigned char __user *buffer;
struct file *instance;
poll_table *event_list;
+ __poll_t poll_mask;
};
/* These ioctls are internal to the kernel */
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 725282095840..d2ea5863eedc 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -364,7 +364,7 @@ struct iw_handler_def {
* defined in struct iw_priv_args.
*
* For standard IOCTLs, things are quite different and we need to
- * use the stuctures below. Actually, this struct is also more
+ * use the structures below. Actually, this struct is also more
* efficient, but that's another story...
*/
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 72c5b8fc3232..28b996d63490 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -432,9 +432,11 @@ static inline int sctp_list_single_entry(struct list_head *head)
static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
{
struct sctp_sock *sp = sctp_sk(asoc->base.sk);
+ struct sctp_af *af = sp->pf->af;
int frag = pmtu;
- frag -= sp->pf->af->net_header_len;
+ frag -= af->ip_options_len(asoc->base.sk);
+ frag -= af->net_header_len;
frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
if (asoc->user_frag)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index c63249ea34c3..a0ec462bc1a9 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -491,6 +491,7 @@ struct sctp_af {
void (*ecn_capable)(struct sock *sk);
__u16 net_header_len;
int sockaddr_len;
+ int (*ip_options_len)(struct sock *sk);
sa_family_t sa_family;
struct list_head list;
};
@@ -515,6 +516,7 @@ struct sctp_pf {
int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
+ void (*copy_ip_options)(struct sock *sk, struct sock *newsk);
struct sctp_af *af;
};
@@ -1320,6 +1322,16 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
+
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
};
/* Recover the outter endpoint structure. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 49bd2c1796b0..74d725fdbe0f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1114,8 +1114,8 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
slab_flags_t slab_flags;
- size_t useroffset; /* Usercopy region offset */
- size_t usersize; /* Usercopy region size */
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
struct percpu_counter *orphan_count;
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 415e09960017..a08cc7278980 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -119,10 +119,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
struct rdma_dev_addr *addr, void *context),
void *context);
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr);
-
void rdma_addr_cancel(struct rdma_dev_addr *addr);
void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
@@ -133,11 +129,6 @@ int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_size_in6(struct sockaddr_in6 *addr);
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
-int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
- const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
- int *hoplimit);
-
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9];
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 385ec88ee9e5..eb49cc8d1f95 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -55,20 +55,6 @@ int ib_get_cached_gid(struct ib_device *device,
union ib_gid *gid,
struct ib_gid_attr *attr);
-/**
- * ib_find_cached_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
- * @device: The device to query.
- * @gid: The GID value to search for.
- * @gid_type: The GID type to search for.
- * @ndev: In RoCE, the net device of the device. NULL means ignore.
- * @port_num: The port number of the device where the GID value was found.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
- *
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
- */
int ib_find_cached_gid(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
@@ -76,21 +62,6 @@ int ib_find_cached_gid(struct ib_device *device,
u8 *port_num,
u16 *index);
-/**
- * ib_find_cached_gid_by_port - Returns the GID table index where a specified
- * GID value occurs
- * @device: The device to query.
- * @gid: The GID value to search for.
- * @gid_type: The GID type to search for.
- * @port_num: The port number of the device where the GID value sould be
- * searched.
- * @ndev: In RoCE, the net device of the device. Null means ignore.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
- *
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
- */
int ib_find_cached_gid_by_port(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 811cfcfcbe3d..bacb144f7780 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -163,7 +163,15 @@ struct sa_path_rec_ib {
u8 raw_traffic;
};
+/**
+ * struct sa_path_rec_roce - RoCE specific portion of the path record entry
+ * @route_resolved: When set, it indicates that this route is already
+ * resolved for this path record entry.
+ * @dmac: Destination mac address for the given DGID entry
+ * of the path record entry.
+ */
struct sa_path_rec_roce {
+ bool route_resolved;
u8 dmac[ETH_ALEN];
/* ignored in IB */
int ifindex;
@@ -590,6 +598,11 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
+static inline bool sa_path_is_opa(struct sa_path_rec *rec)
+{
+ return (rec->rec_type == SA_PATH_REC_TYPE_OPA);
+}
+
static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6eb174753acf..9fc8a825aa28 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -64,6 +64,8 @@
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_user_ioctl.h>
+#include <uapi/rdma/ib_user_ioctl_verbs.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -90,8 +92,11 @@ enum ib_gid_type {
#define ROCE_V2_UDP_DPORT 4791
struct ib_gid_attr {
- enum ib_gid_type gid_type;
struct net_device *ndev;
+ struct ib_device *device;
+ enum ib_gid_type gid_type;
+ u16 index;
+ u8 port_num;
};
enum rdma_node_type {
@@ -316,6 +321,18 @@ struct ib_cq_caps {
u16 max_cq_moderation_period;
};
+struct ib_dm_mr_attr {
+ u64 length;
+ u64 offset;
+ u32 access_flags;
+};
+
+struct ib_dm_alloc_attr {
+ u64 length;
+ u32 alignment;
+ u32 flags;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -367,6 +384,7 @@ struct ib_device_attr {
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
struct ib_tm_caps tm_caps;
struct ib_cq_caps cq_caps;
+ u64 max_dm_size;
};
enum ib_mtu {
@@ -469,6 +487,9 @@ enum ib_port_speed {
/**
* struct rdma_hw_stats
+ * @lock - Mutex to protect parallel write access to lifespan and values
+ * of counters, which are 64bits and not guaranteeed to be written
+ * atomicaly on 32bits systems.
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
@@ -484,6 +505,7 @@ enum ib_port_speed {
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
+ struct mutex lock; /* Protect lifespan and values[] */
unsigned long timestamp;
unsigned long lifespan;
const char * const *names;
@@ -1755,6 +1777,14 @@ struct ib_qp {
struct rdma_restrack_entry res;
};
+struct ib_dm {
+ struct ib_device *device;
+ u32 length;
+ u32 flags;
+ struct ib_uobject *uobject;
+ atomic_t usecnt;
+};
+
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
@@ -1768,6 +1798,13 @@ struct ib_mr {
struct ib_uobject *uobject; /* user */
struct list_head qp_entry; /* FR */
};
+
+ struct ib_dm *dm;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mw {
@@ -1810,6 +1847,7 @@ enum ib_flow_spec_type {
/* L3 header*/
IB_FLOW_SPEC_IPV4 = 0x30,
IB_FLOW_SPEC_IPV6 = 0x31,
+ IB_FLOW_SPEC_ESP = 0x34,
/* L4 headers*/
IB_FLOW_SPEC_TCP = 0x40,
IB_FLOW_SPEC_UDP = 0x41,
@@ -1818,6 +1856,7 @@ enum ib_flow_spec_type {
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
IB_FLOW_SPEC_ACTION_DROP = 0x1001,
+ IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
@@ -1835,7 +1874,8 @@ enum ib_flow_domain {
enum ib_flow_flags {
IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
- IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */
+ IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
+ IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
};
struct ib_flow_eth_filter {
@@ -1940,6 +1980,20 @@ struct ib_flow_spec_tunnel {
struct ib_flow_tunnel_filter mask;
};
+struct ib_flow_esp_filter {
+ __be32 spi;
+ __be32 seq;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_esp {
+ u32 type;
+ u16 size;
+ struct ib_flow_esp_filter val;
+ struct ib_flow_esp_filter mask;
+};
+
struct ib_flow_spec_action_tag {
enum ib_flow_spec_type type;
u16 size;
@@ -1951,6 +2005,12 @@ struct ib_flow_spec_action_drop {
u16 size;
};
+struct ib_flow_spec_action_handle {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_action *act;
+};
+
union ib_flow_spec {
struct {
u32 type;
@@ -1962,8 +2022,10 @@ union ib_flow_spec {
struct ib_flow_spec_tcp_udp tcp_udp;
struct ib_flow_spec_ipv6 ipv6;
struct ib_flow_spec_tunnel tunnel;
+ struct ib_flow_spec_esp esp;
struct ib_flow_spec_action_tag flow_tag;
struct ib_flow_spec_action_drop drop;
+ struct ib_flow_spec_action_handle action;
};
struct ib_flow_attr {
@@ -1984,6 +2046,64 @@ struct ib_flow {
struct ib_uobject *uobject;
};
+enum ib_flow_action_type {
+ IB_FLOW_ACTION_UNSPECIFIED,
+ IB_FLOW_ACTION_ESP = 1,
+};
+
+struct ib_flow_action_attrs_esp_keymats {
+ enum ib_uverbs_flow_action_esp_keymat protocol;
+ union {
+ struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
+ } keymat;
+};
+
+struct ib_flow_action_attrs_esp_replays {
+ enum ib_uverbs_flow_action_esp_replay protocol;
+ union {
+ struct ib_uverbs_flow_action_esp_replay_bmp bmp;
+ } replay;
+};
+
+enum ib_flow_action_attrs_esp_flags {
+ /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
+ * This is done in order to share the same flags between user-space and
+ * kernel and spare an unnecessary translation.
+ */
+
+ /* Kernel flags */
+ IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
+ IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
+};
+
+struct ib_flow_spec_list {
+ struct ib_flow_spec_list *next;
+ union ib_flow_spec spec;
+};
+
+struct ib_flow_action_attrs_esp {
+ struct ib_flow_action_attrs_esp_keymats *keymat;
+ struct ib_flow_action_attrs_esp_replays *replay;
+ struct ib_flow_spec_list *encap;
+ /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
+ * Value of 0 is a valid value.
+ */
+ u32 esn;
+ u32 spi;
+ u32 seq;
+ u32 tfc_pad;
+ /* Use enum ib_flow_action_attrs_esp_flags */
+ u64 flags;
+ u64 hard_limit_pkts;
+};
+
+struct ib_flow_action {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ enum ib_flow_action_type type;
+ atomic_t usecnt;
+};
+
struct ib_mad_hdr;
struct ib_grh;
@@ -2060,6 +2180,8 @@ struct ib_port_pkey_list {
struct list_head pkey_list;
};
+struct uverbs_attr_bundle;
+
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
@@ -2127,37 +2249,36 @@ struct ib_device {
*/
struct net_device *(*get_netdev)(struct ib_device *device,
u8 port_num);
+ /* query_gid should be return GID value for @device, when @port_num
+ * link layer is either IB or iWarp. It is no-op if @port_num port
+ * is RoCE link layer.
+ */
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
- /* When calling add_gid, the HW vendor's driver should
- * add the gid of device @device at gid index @index of
- * port @port_num to be @gid. Meta-info of that gid (for example,
- * the network device related to this gid is available
- * at @attr. @context allows the HW vendor driver to store extra
- * information together with a GID entry. The HW vendor may allocate
- * memory to contain this information and store it in @context when a
- * new GID entry is written to. Params are consistent until the next
- * call of add_gid or delete_gid. The function should return 0 on
+ /* When calling add_gid, the HW vendor's driver should add the gid
+ * of device of port at gid index available at @attr. Meta-info of
+ * that gid (for example, the network device related to this gid) is
+ * available at @attr. @context allows the HW vendor driver to store
+ * extra information together with a GID entry. The HW vendor driver may
+ * allocate memory to contain this information and store it in @context
+ * when a new GID entry is written to. Params are consistent until the
+ * next call of add_gid or delete_gid. The function should return 0 on
* success or error otherwise. The function could be called
- * concurrently for different ports. This function is only called
- * when roce_gid_table is used.
+ * concurrently for different ports. This function is only called when
+ * roce_gid_table is used.
*/
- int (*add_gid)(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
+ int (*add_gid)(const union ib_gid *gid,
const struct ib_gid_attr *attr,
void **context);
/* When calling del_gid, the HW vendor's driver should delete the
- * gid of device @device at gid index @index of port @port_num.
+ * gid of device @device at gid index gid_index of port port_num
+ * available in @attr.
* Upon the deletion of a GID entry, the HW vendor must free any
* allocated memory. The caller will clear @context afterwards.
* This function is only called when roce_gid_table is used.
*/
- int (*del_gid)(struct ib_device *device,
- u8 port_num,
- unsigned int index,
+ int (*del_gid)(const struct ib_gid_attr *attr,
void **context);
int (*query_pkey)(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
@@ -2315,6 +2436,21 @@ struct ib_device {
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+ struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*destroy_flow_action)(struct ib_flow_action *action);
+ int (*modify_flow_action_esp)(struct ib_flow_action *action,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ struct ib_dm * (*alloc_dm)(struct ib_device *device,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dm)(struct ib_dm *dm);
+ struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs);
/**
* rdma netdev operation
*
@@ -2376,6 +2512,7 @@ struct ib_device {
int comp_vector);
struct uverbs_root_spec *specs_root;
+ enum rdma_driver_id driver_id;
};
struct ib_client {
@@ -2435,11 +2572,9 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
}
-static inline bool ib_is_udata_cleared(struct ib_udata *udata,
- size_t offset,
- size_t len)
+static inline bool ib_is_buffer_cleared(const void __user *p,
+ size_t len)
{
- const void __user *p = udata->inbuf + offset;
bool ret;
u8 *buf;
@@ -2455,6 +2590,13 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
return ret;
}
+static inline bool ib_is_udata_cleared(struct ib_udata *udata,
+ size_t offset,
+ size_t len)
+{
+ return ib_is_buffer_cleared(udata->inbuf + offset, len);
+}
+
/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
@@ -2471,9 +2613,9 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
* transition from cur_state to next_state is allowed by the IB spec,
* and that the attribute mask supplied is allowed for the transition.
*/
-int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll);
+bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ enum ib_qp_type type, enum ib_qp_attr_mask mask,
+ enum rdma_link_layer ll);
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
@@ -2848,7 +2990,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- struct net_device *ndev, u8 *port_num, u16 *index);
+ u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -3217,18 +3359,6 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
}
/**
- * ib_peek_cq - Returns the number of unreaped completions currently
- * on the specified CQ.
- * @cq: The CQ to peek.
- * @wc_cnt: A minimum number of unreaped completions to check for.
- *
- * If the number of unreaped completions is greater than or equal to wc_cnt,
- * this function returns wc_cnt, otherwise, it returns the actual number of
- * unreaped completions.
- */
-int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-/**
* ib_req_notify_cq - Request completion notification on a CQ.
* @cq: The CQ to generate an event for.
* @flags:
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 6538a5cc27b6..690934733ba7 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -38,6 +38,7 @@
#include <linux/in6.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_sa.h>
+#include <uapi/rdma/rdma_user_cm.h>
/*
* Upon receiving a device removal event, users must destroy the associated
@@ -64,14 +65,6 @@ enum rdma_cm_event_type {
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
-enum rdma_port_space {
- RDMA_PS_SDP = 0x0001,
- RDMA_PS_IPOIB = 0x0002,
- RDMA_PS_IB = 0x013F,
- RDMA_PS_TCP = 0x0106,
- RDMA_PS_UDP = 0x0111,
-};
-
#define RDMA_IB_IP_PS_MASK 0xFFFFFFFFFFFF0000ULL
#define RDMA_IB_IP_PS_TCP 0x0000000001060000ULL
#define RDMA_IB_IP_PS_UDP 0x0000000001110000ULL
@@ -120,20 +113,6 @@ struct rdma_cm_event {
} param;
};
-enum rdma_cm_state {
- RDMA_CM_IDLE,
- RDMA_CM_ADDR_QUERY,
- RDMA_CM_ADDR_RESOLVED,
- RDMA_CM_ROUTE_QUERY,
- RDMA_CM_ROUTE_RESOLVED,
- RDMA_CM_CONNECT,
- RDMA_CM_DISCONNECT,
- RDMA_CM_ADDR_BOUND,
- RDMA_CM_LISTEN,
- RDMA_CM_DEVICE_REMOVAL,
- RDMA_CM_DESTROYING
-};
-
struct rdma_cm_id;
/**
@@ -152,11 +131,17 @@ struct rdma_cm_id {
struct ib_qp *qp;
rdma_cm_event_handler event_handler;
struct rdma_route route;
- enum rdma_port_space ps;
+ enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
u8 port_num;
};
+struct rdma_cm_id *__rdma_create_id(struct net *net,
+ rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type,
+ const char *caller);
+
/**
* rdma_create_id - Create an RDMA identifier.
*
@@ -169,10 +154,9 @@ struct rdma_cm_id {
*
* The id holds a reference on the network namespace until it is destroyed.
*/
-struct rdma_cm_id *rdma_create_id(struct net *net,
- rdma_cm_event_handler event_handler,
- void *context, enum rdma_port_space ps,
- enum ib_qp_type qp_type);
+#define rdma_create_id(net, event_handler, context, ps, qp_type) \
+ __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
+ KBUILD_MODNAME)
/**
* rdma_destroy_id - Destroys an RDMA identifier.
@@ -284,6 +268,9 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
*/
int rdma_listen(struct rdma_cm_id *id, int backlog);
+int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller);
+
/**
* rdma_accept - Called to accept a connection request or response.
* @id: Connection identifier associated with the request.
@@ -299,7 +286,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
* state of the qp associated with the id is modified to error, such that any
* previously posted receive buffers would be flushed.
*/
-int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+#define rdma_accept(id, conn_param) \
+ __rdma_accept((id), (conn_param), KBUILD_MODNAME)
/**
* rdma_notify - Notifies the RDMA CM of an asynchronous event that has
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 4118324a0310..3f4c187e435d 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -538,7 +538,7 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi);
-int rvt_register_device(struct rvt_dev_info *rvd);
+int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 2cdf8dcf4bdc..f3b3e3576f6a 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/kref.h>
#include <linux/completion.h>
+#include <linux/sched/task.h>
/**
* enum rdma_restrack_type - HW objects to track
@@ -29,6 +30,14 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_QP,
/**
+ * @RDMA_RESTRACK_CM_ID: Connection Manager ID (CM_ID)
+ */
+ RDMA_RESTRACK_CM_ID,
+ /**
+ * @RDMA_RESTRACK_MR: Memory Region (MR)
+ */
+ RDMA_RESTRACK_MR,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
@@ -146,8 +155,23 @@ static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
/**
- * rdma_restrack_put() - relase resource
+ * rdma_restrack_put() - release resource
* @res: resource entry
*/
int rdma_restrack_put(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_set_task() - set the task for this resource
+ * @res: resource entry
+ * @task: task struct
+ */
+static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ struct task_struct *task)
+{
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(task);
+ res->task = task;
+}
+
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 38287d9d23a1..4a4201d997a7 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <rdma/rdma_user_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/ib_user_ioctl_cmds.h>
/*
* =======================================
@@ -50,6 +51,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_PTR_OUT,
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
+ UVERBS_ATTR_TYPE_ENUM_IN,
};
enum uverbs_obj_access {
@@ -61,15 +63,32 @@ enum uverbs_obj_access {
enum {
UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0,
- /* Support extending attributes by length */
- UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1,
+ /* Support extending attributes by length, validate all unknown size == zero */
+ UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO = 1U << 1,
};
+/* Specification of a single attribute inside the ioctl message */
struct uverbs_attr_spec {
- enum uverbs_attr_type type;
union {
- u16 len;
+ /* Header shared by all following union members - to reduce space. */
struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ };
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ /* Current known size to kernel */
+ u16 len;
+ /* User isn't allowed to provide something < min_len */
+ u16 min_len;
+ } ptr;
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
/*
* higher bits mean the namespace and lower bits mean
* the type id within the namespace.
@@ -77,9 +96,19 @@ struct uverbs_attr_spec {
u16 obj_type;
u8 access;
} obj;
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ u8 num_elems;
+ /*
+ * The enum attribute can select one of the attributes
+ * contained in the ids array. Currently only PTR_IN
+ * attributes are supported in the ids array.
+ */
+ const struct uverbs_attr_spec *ids;
+ } enum_def;
};
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
};
struct uverbs_attr_spec_hash {
@@ -164,30 +193,45 @@ struct uverbs_object_tree_def {
};
#define UA_FLAGS(_flags) .flags = _flags
-#define __UVERBS_ATTR0(_id, _len, _type, ...) \
+#define __UVERBS_ATTR0(_id, _type, _fld, _attr, ...) \
((const struct uverbs_attr_def) \
- {.id = _id, .attr = {.type = _type, {.len = _len}, .flags = 0, } })
-#define __UVERBS_ATTR1(_id, _len, _type, _flags) \
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, .flags = 0, } }, } })
+#define __UVERBS_ATTR1(_id, _type, _fld, _attr, _extra1, ...) \
((const struct uverbs_attr_def) \
- {.id = _id, .attr = {.type = _type, {.len = _len}, _flags, } })
-#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \
- __UVERBS_ATTR##_n(_id, _len, _type, _flags)
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1 } },} })
+#define __UVERBS_ATTR2(_id, _type, _fld, _attr, _extra1, _extra2) \
+ ((const struct uverbs_attr_def) \
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1, _extra2 } },} })
+#define __UVERBS_ATTR(_id, _type, _fld, _attr, _extra1, _extra2, _n, ...) \
+ __UVERBS_ATTR##_n(_id, _type, _fld, _attr, _extra1, _extra2)
+
+#define UVERBS_ATTR_TYPE(_type) \
+ .min_len = sizeof(_type), .len = sizeof(_type)
+#define UVERBS_ATTR_STRUCT(_type, _last) \
+ .min_len = ((uintptr_t)(&((_type *)0)->_last + 1)), .len = sizeof(_type)
+#define UVERBS_ATTR_SIZE(_min_len, _len) \
+ .min_len = _min_len, .len = _len
+
/*
* In new compiler, UVERBS_ATTR could be simplified by declaring it as
* [_id] = {.type = _type, .len = _len, ##__VA_ARGS__}
* But since we support older compilers too, we need the more complex code.
*/
-#define UVERBS_ATTR(_id, _len, _type, ...) \
- __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0)
+#define UVERBS_ATTR(_id, _type, _fld, _attr, ...) \
+ __UVERBS_ATTR(_id, _type, _fld, _attr, ##__VA_ARGS__, 2, 1, 0)
#define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__)
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_IN, ptr, _len, ##__VA_ARGS__)
/* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */
#define UVERBS_ATTR_PTR_IN(_id, _type, ...) \
- UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+ UVERBS_ATTR_PTR_IN_SZ(_id, _type, ##__VA_ARGS__)
#define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__)
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_OUT, ptr, _len, ##__VA_ARGS__)
#define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \
- UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+ UVERBS_ATTR_PTR_OUT_SZ(_id, _type, ##__VA_ARGS__)
+#define UVERBS_ATTR_ENUM_IN(_id, _enum_arr, ...) \
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_ENUM_IN, enum_def, \
+ .ids = (_enum_arr), \
+ .num_elems = ARRAY_SIZE(_enum_arr), ##__VA_ARGS__)
/*
* In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring
@@ -202,15 +246,13 @@ struct uverbs_object_tree_def {
#define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\
((const struct uverbs_attr_def) \
{.id = _id, \
- .attr = {.type = _obj_class, \
- {.obj = {.obj_type = _obj_type, .access = _access } },\
- .flags = 0} })
+ .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
+ .access = _access, .flags = 0 } }, } })
#define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\
((const struct uverbs_attr_def) \
{.id = _id, \
- .attr = {.type = _obj_class, \
- {.obj = {.obj_type = _obj_type, .access = _access} }, \
- _flags} })
+ .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
+ .access = _access, _flags} }, } })
#define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \
_n, ...) \
___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags)
@@ -229,6 +271,11 @@ struct uverbs_object_tree_def {
#define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \
const struct uverbs_attr_def _name = __VA_ARGS__
+#define DECLARE_UVERBS_ENUM(_name, ...) \
+ const struct uverbs_enum_spec _name = { \
+ .len = ARRAY_SIZE(((struct uverbs_attr_spec[]){__VA_ARGS__})),\
+ .ids = {__VA_ARGS__}, \
+ }
#define _UVERBS_METHOD_ATTRS_SZ(...) \
(sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\
sizeof(const struct uverbs_attr_def *))
@@ -280,6 +327,7 @@ struct uverbs_ptr_attr {
u16 len;
/* Combination of bits from enum UVERBS_ATTR_F_XXXX */
u16 flags;
+ u8 enum_id;
};
struct uverbs_obj_attr {
@@ -336,6 +384,8 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
idx & ~UVERBS_ID_NS_MASK);
}
+#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
+
static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
u16 idx)
{
@@ -347,6 +397,29 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK];
}
+static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return attr->ptr_attr.enum_id;
+}
+
+static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject;
+
+ if (IS_ERR(uobj))
+ return uobj;
+
+ return uobj->object;
+}
+
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, const void *from, size_t size)
{
@@ -385,8 +458,8 @@ static inline int _uverbs_copy_from(void *to,
/*
* Validation ensures attr->ptr_attr.len >= size. If the caller is
- * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with
- * the right size.
+ * using UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO then it must call
+ * uverbs_copy_from_or_zero.
*/
if (unlikely(size < attr->ptr_attr.len))
return -EINVAL;
@@ -400,9 +473,37 @@ static inline int _uverbs_copy_from(void *to,
return 0;
}
+static inline int _uverbs_copy_from_or_zero(void *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx,
+ size_t size)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+ size_t min_size;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ min_size = min_t(size_t, size, attr->ptr_attr.len);
+
+ if (uverbs_attr_ptr_is_inline(attr))
+ memcpy(to, &attr->ptr_attr.data, min_size);
+ else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
+ min_size))
+ return -EFAULT;
+
+ if (size > min_size)
+ memset(to + min_size, 0, size - min_size);
+
+ return 0;
+}
+
#define uverbs_copy_from(to, attrs_bundle, idx) \
_uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
+#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \
+ _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to))
+
/* =================================================
* Definitions -> Specs infrastructure
* =================================================
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
new file mode 100644
index 000000000000..c5bb4ebdb0b0
--- /dev/null
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UVERBS_NAMED_IOCTL_
+#define _UVERBS_NAMED_IOCTL_
+
+#include <rdma/uverbs_ioctl.h>
+
+#ifndef UVERBS_MODULE_NAME
+#error "Please #define UVERBS_MODULE_NAME before including rdma/uverbs_named_ioctl.h"
+#endif
+
+#define _UVERBS_PASTE(x, y) x ## y
+#define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y)
+#define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id)
+#define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id)
+
+#define DECLARE_UVERBS_NAMED_METHOD(id, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, UVERBS_HANDLER(id), ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(id, handler, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, handler, ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_METHOD_NO_OVERRIDE(id, handler, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, NULL, ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_OBJECT(id, ...) \
+ DECLARE_UVERBS_OBJECT(UVERBS_OBJECT(id), id, ##__VA_ARGS__)
+
+#define _UVERBS_COMP_NAME(x, y, z) _UVERBS_NAME(_UVERBS_NAME(x, y), z)
+
+#define UVERBS_NO_OVERRIDE NULL
+
+/* This declares a parsing tree with one object and one method. This is usually
+ * used for merging driver attributes to the common attributes. The driver has
+ * a chance to override the handler and type attrs of the original object.
+ * The __VA_ARGS__ just contains a list of attributes.
+ */
+#define ADD_UVERBS_ATTRIBUTES(_name, _object, _method, _type_attrs, _handler, ...) \
+static DECLARE_UVERBS_METHOD(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _method_, _name), \
+ _method, _handler, ##__VA_ARGS__); \
+ \
+static DECLARE_UVERBS_OBJECT(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _object_, _name), \
+ _object, _type_attrs, \
+ &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _method_, _name)); \
+ \
+static DECLARE_UVERBS_OBJECT_TREE(_name, \
+ &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _object_, _name))
+
+/* A very common use case is that the driver doesn't override the handler and
+ * type_attrs. Therefore, we provide a simplified macro for this common case.
+ */
+#define ADD_UVERBS_ATTRIBUTES_SIMPLE(_name, _object, _method, ...) \
+ ADD_UVERBS_ATTRIBUTES(_name, _object, _method, UVERBS_NO_OVERRIDE, \
+ UVERBS_NO_OVERRIDE, ##__VA_ARGS__)
+
+#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 5f8e20bbd67c..9d56cdb84655 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -37,26 +37,10 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
+#define UVERBS_OBJECT(id) uverbs_object_##id
+
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-extern const struct uverbs_object_def uverbs_object_comp_channel;
-extern const struct uverbs_object_def uverbs_object_cq;
-extern const struct uverbs_object_def uverbs_object_qp;
-extern const struct uverbs_object_def uverbs_object_rwq_ind_table;
-extern const struct uverbs_object_def uverbs_object_wq;
-extern const struct uverbs_object_def uverbs_object_srq;
-extern const struct uverbs_object_def uverbs_object_ah;
-extern const struct uverbs_object_def uverbs_object_flow;
-extern const struct uverbs_object_def uverbs_object_mr;
-extern const struct uverbs_object_def uverbs_object_mw;
-extern const struct uverbs_object_def uverbs_object_pd;
-extern const struct uverbs_object_def uverbs_object_xrcd;
-extern const struct uverbs_object_def uverbs_object_device;
-
-extern const struct uverbs_object_tree_def uverbs_default_objects;
-static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
-{
- return &uverbs_default_objects;
-}
+const struct uverbs_object_tree_def *uverbs_default_get_objects(void);
#else
static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
@@ -72,22 +56,22 @@ static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
return rdma_lookup_get_uobject(type, ucontext, id, write);
}
-#define uobj_get_type(_object) uverbs_object_##_object.type_attrs
+#define uobj_get_type(_object) UVERBS_OBJECT(_object).type_attrs
#define uobj_get_read(_type, _id, _ucontext) \
- __uobj_get(_type, false, _ucontext, _id)
+ __uobj_get(uobj_get_type(_type), false, _ucontext, _id)
-#define uobj_get_obj_read(_object, _id, _ucontext) \
+#define uobj_get_obj_read(_object, _type, _id, _ucontext) \
({ \
struct ib_uobject *__uobj = \
- __uobj_get(uverbs_object_##_object.type_attrs, \
+ __uobj_get(uobj_get_type(_type), \
false, _ucontext, _id); \
\
(struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\
})
#define uobj_get_write(_type, _id, _ucontext) \
- __uobj_get(_type, true, _ucontext, _id)
+ __uobj_get(uobj_get_type(_type), true, _ucontext, _id)
static inline void uobj_put_read(struct ib_uobject *uobj)
{
@@ -124,7 +108,7 @@ static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type
}
#define uobj_alloc(_type, ucontext) \
- __uobj_alloc(_type, ucontext)
+ __uobj_alloc(uobj_get_type(_type), ucontext)
#endif
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index cb85eddb47ea..eb7853c1a23b 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -47,6 +47,8 @@ static inline int scsi_status_is_good(int status)
*/
status &= 0xfe;
return ((status == SAM_STAT_GOOD) ||
+ (status == SAM_STAT_CONDITION_MET) ||
+ /* Next two "intermediate" statuses are obsolete in SAM-4 */
(status == SAM_STAT_INTERMEDIATE) ||
(status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
/* FIXME: this is obsolete in SAM-3 */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 2280b2351739..aaf1e971c6a3 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -174,8 +174,13 @@ extern void scsi_kunmap_atomic_sg(void *virt);
extern int scsi_init_io(struct scsi_cmnd *cmd);
+#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
+#else /* !CONFIG_SCSI_DMA */
+static inline int scsi_dma_map(struct scsi_cmnd *cmd) { return -ENOSYS; }
+static inline void scsi_dma_unmap(struct scsi_cmnd *cmd) { }
+#endif /* !CONFIG_SCSI_DMA */
static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
{
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 9c1e4bad6581..12f454cb6f61 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -52,21 +52,6 @@ struct scsi_host_template {
const char *name;
/*
- * Used to initialize old-style drivers. For new-style drivers
- * just perform all work in your module initialization function.
- *
- * Status: OBSOLETE
- */
- int (* detect)(struct scsi_host_template *);
-
- /*
- * Used as unload callback for hosts with old-style drivers.
- *
- * Status: OBSOLETE
- */
- int (* release)(struct Scsi_Host *);
-
- /*
* The info function will return whatever useful information the
* developer sees fit. If not provided, then the name field will
* be used instead.
@@ -480,13 +465,10 @@ struct scsi_host_template {
struct device_attribute **sdev_attrs;
/*
- * List of hosts per template.
- *
- * This is only for use by scsi_module.c for legacy templates.
- * For these access to it is synchronized implicitly by
- * module_init/module_exit.
+ * Pointer to the SCSI device attribute groups for this host,
+ * NULL terminated.
*/
- struct list_head legacy_hosts;
+ const struct attribute_group **sdev_groups;
/*
* Vendor Identifier associated with the host
@@ -709,15 +691,6 @@ struct Scsi_Host {
struct device shost_gendev, shost_dev;
/*
- * List of hosts per template.
- *
- * This is only for use by scsi_module.c for legacy templates.
- * For these access to it is synchronized implicitly by
- * module_init/module_exit.
- */
- struct list_head sht_legacy_list;
-
- /*
* Points to the transport data (if any) which is allocated
* separately
*/
@@ -917,9 +890,6 @@ static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
return shost->prot_guard_type;
}
-/* legacy interfaces */
-extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
-extern void scsi_unregister(struct Scsi_Host *);
extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
#endif /* _SCSI_SCSI_HOST_H */
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index cb979ad90401..50df5b28d2c9 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -63,6 +63,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_MIN_VOLTAGE = 0x00030008,
RPI_FIRMWARE_GET_TURBO = 0x00030009,
RPI_FIRMWARE_GET_MAX_TEMPERATURE = 0x0003000a,
+ RPI_FIRMWARE_GET_STC = 0x0003000b,
RPI_FIRMWARE_ALLOCATE_MEMORY = 0x0003000c,
RPI_FIRMWARE_LOCK_MEMORY = 0x0003000d,
RPI_FIRMWARE_UNLOCK_MEMORY = 0x0003000e,
@@ -72,12 +73,22 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_SET_ENABLE_QPU = 0x00030012,
RPI_FIRMWARE_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014,
RPI_FIRMWARE_GET_EDID_BLOCK = 0x00030020,
+ RPI_FIRMWARE_GET_CUSTOMER_OTP = 0x00030021,
RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030,
RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001,
RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002,
RPI_FIRMWARE_SET_VOLTAGE = 0x00038003,
RPI_FIRMWARE_SET_TURBO = 0x00038009,
+ RPI_FIRMWARE_SET_CUSTOMER_OTP = 0x00038021,
RPI_FIRMWARE_SET_DOMAIN_STATE = 0x00038030,
+ RPI_FIRMWARE_GET_GPIO_STATE = 0x00030041,
+ RPI_FIRMWARE_SET_GPIO_STATE = 0x00038041,
+ RPI_FIRMWARE_SET_SDHOST_CLOCK = 0x00038042,
+ RPI_FIRMWARE_GET_GPIO_CONFIG = 0x00030043,
+ RPI_FIRMWARE_SET_GPIO_CONFIG = 0x00038043,
+ RPI_FIRMWARE_GET_PERIPH_REG = 0x00030045,
+ RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045,
+
/* Dispmanx TAGS */
RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
@@ -91,6 +102,8 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 0x00040009,
RPI_FIRMWARE_FRAMEBUFFER_GET_OVERSCAN = 0x0004000a,
RPI_FIRMWARE_FRAMEBUFFER_GET_PALETTE = 0x0004000b,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_TOUCHBUF = 0x0004000f,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_GPIOVIRTBUF = 0x00040010,
RPI_FIRMWARE_FRAMEBUFFER_RELEASE = 0x00048001,
RPI_FIRMWARE_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 0x00044003,
RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 0x00044004,
@@ -100,6 +113,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 0x00044009,
RPI_FIRMWARE_FRAMEBUFFER_TEST_OVERSCAN = 0x0004400a,
RPI_FIRMWARE_FRAMEBUFFER_TEST_PALETTE = 0x0004400b,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_VSYNC = 0x0004400e,
RPI_FIRMWARE_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 0x00048003,
RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 0x00048004,
RPI_FIRMWARE_FRAMEBUFFER_SET_DEPTH = 0x00048005,
@@ -108,6 +122,10 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 0x00048009,
RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF = 0x0004801f,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_GPIOVIRTBUF = 0x00048020,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_VSYNC = 0x0004800e,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_BACKLIGHT = 0x0004800f,
RPI_FIRMWARE_VCHIQ_INIT = 0x00048010,
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index aeae4466dd25..e69e4c4d80ae 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -75,8 +75,8 @@ struct tegra_bpmp {
struct mbox_chan *channel;
} mbox;
- struct tegra_bpmp_channel *channels;
- unsigned int num_channels;
+ spinlock_t atomic_tx_lock;
+ struct tegra_bpmp_channel *tx_channel, *rx_channel, *threaded_channels;
struct {
unsigned long *allocated;
diff --git a/include/sound/da7219.h b/include/sound/da7219.h
index 409ef1397fd3..1bfcb16f2d10 100644
--- a/include/sound/da7219.h
+++ b/include/sound/da7219.h
@@ -36,6 +36,8 @@ struct da7219_aad_pdata;
struct da7219_pdata {
bool wakeup_source;
+ const char *dai_clks_name;
+
/* Mic */
enum da7219_micbias_voltage micbias_lvl;
enum da7219_mic_amp_in_sel mic_amp_in_sel;
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 67be2445941a..e3481eebdd98 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -118,6 +118,8 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
* PCM substream. Will be called from the PCM drivers hwparams callback.
* @compat_request_channel: Callback to request a DMA channel for platforms
* which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
* @compat_filter_fn: Will be used as the filter function when requesting a
* channel for platforms which do not use devicetree. The filter parameter
* will be the DAI's DMA data.
@@ -140,6 +142,9 @@ struct snd_dmaengine_pcm_config {
struct dma_chan *(*compat_request_channel)(
struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
@@ -161,4 +166,6 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct dma_slave_config *slave_config);
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
#endif
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 4f42affe777c..5ebcc51c0a6a 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1710,6 +1710,7 @@ struct snd_emu10k1 {
unsigned int ecard_ctrl; /* ecard control bits */
unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
+ bool iommu_workaround; /* IOMMU workaround needed */
unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
struct snd_dma_buffer silent_page; /* silent page */
@@ -1718,7 +1719,6 @@ struct snd_emu10k1 {
struct snd_dma_buffer p16v_buffer;
struct snd_util_memhdr *memhdr; /* page allocation list */
- struct snd_emu10k1_memblk *reserved_page; /* reserved page */
struct list_head mapped_link_head;
struct list_head mapped_order_link_head;
@@ -1878,6 +1878,8 @@ void snd_p16v_resume(struct snd_emu10k1 *emu);
/* memory allocation */
struct snd_util_memblk *snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream);
int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk);
+int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
+ struct snd_dma_buffer *dmab);
struct snd_util_memblk *snd_emu10k1_synth_alloc(struct snd_emu10k1 *emu, unsigned int size);
int snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *blk);
int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, int size);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 5b2ed12f58ce..06536e01ed94 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -146,6 +146,8 @@ int snd_hdac_codec_write(struct hdac_device *hdac, hda_nid_t nid,
int flags, unsigned int verb, unsigned int parm);
bool snd_hdac_check_power_state(struct hdac_device *hdac,
hda_nid_t nid, unsigned int target_state);
+unsigned int snd_hdac_sync_power_state(struct hdac_device *hdac,
+ hda_nid_t nid, unsigned int target_state);
/**
* snd_hdac_read_parm - read a codec parameter
* @codec: the codec object
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index 760c969d885d..12bbf8c81112 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
char *buffer; /* vmallocated period */
size_t buffer_used; /* used length from period buffer */
struct mutex params_lock;
+ atomic_t rw_ref; /* concurrent read/write accesses */
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
struct snd_pcm_plugin *plugin_first;
struct snd_pcm_plugin *plugin_last;
diff --git a/include/sound/rt5651.h b/include/sound/rt5651.h
deleted file mode 100644
index 18b79a761f10..000000000000
--- a/include/sound/rt5651.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * linux/sound/rt286.h -- Platform data for RT286
- *
- * Copyright 2013 Realtek Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_SND_RT5651_H
-#define __LINUX_SND_RT5651_H
-
-enum rt5651_jd_src {
- RT5651_JD_NULL,
- RT5651_JD1_1,
- RT5651_JD1_2,
- RT5651_JD2,
-};
-
-struct rt5651_platform_data {
- /* IN2 can optionally be differential */
- bool in2_diff;
-
- bool dmic_en;
- enum rt5651_jd_src jd_src;
-};
-
-#endif
diff --git a/include/sound/rt5659.h b/include/sound/rt5659.h
index 656c4d58948d..9012e2b25360 100644
--- a/include/sound/rt5659.h
+++ b/include/sound/rt5659.h
@@ -30,6 +30,7 @@ enum rt5659_dmic2_data_pin {
enum rt5659_jd_src {
RT5659_JD_NULL,
RT5659_JD3,
+ RT5659_JD_HDA_HEADER,
};
struct rt5659_platform_data {
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 344b96c206a3..a6ce2de4e20a 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -269,6 +269,13 @@ struct device;
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
.on_val = wflags}
+#define SND_SOC_DAPM_PINCTRL(wname, active, sleep) \
+{ .id = snd_soc_dapm_pinctrl, .name = wname, \
+ .priv = (&(struct snd_soc_dapm_pinctrl_priv) \
+ { .active_state = active, .sleep_state = sleep,}), \
+ .reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \
+ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
+
/* dapm kcontrol types */
@@ -374,6 +381,8 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
int dapm_clock_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
+int dapm_pinctrl_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
/* dapm controls */
int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
@@ -500,6 +509,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_pre, /* machine specific pre widget - exec first */
snd_soc_dapm_post, /* machine specific post widget - exec last */
snd_soc_dapm_supply, /* power/clock supply */
+ snd_soc_dapm_pinctrl, /* pinctrl */
snd_soc_dapm_regulator_supply, /* external regulator */
snd_soc_dapm_clock_supply, /* external clock */
snd_soc_dapm_aif_in, /* audio interface input */
@@ -581,6 +591,7 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
+ struct pinctrl *pinctrl; /* attached pinctrl */
const struct snd_soc_pcm_stream *params; /* params for dai links */
unsigned int num_params; /* number of params for dai links */
unsigned int params_select; /* currently selected param for dai link */
@@ -683,6 +694,11 @@ struct snd_soc_dapm_stats {
int neighbour_checks;
};
+struct snd_soc_dapm_pinctrl_priv {
+ const char *active_state;
+ const char *sleep_state;
+};
+
/**
* snd_soc_dapm_init_bias_level() - Initialize DAPM bias level
* @dapm: The DAPM context to initialize
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 747fd583b9dc..ad266d7e9553 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -586,10 +586,17 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
unsigned int mask, unsigned int value);
#ifdef CONFIG_SND_SOC_AC97_BUS
-struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+#define snd_soc_alloc_ac97_codec(codec) \
+ snd_soc_alloc_ac97_component(&codec->component)
+#define snd_soc_new_ac97_codec(codec, id, id_mask) \
+ snd_soc_new_ac97_component(&codec->component, id, id_mask)
+#define snd_soc_free_ac97_codec(ac97) \
+ snd_soc_free_ac97_component(ac97)
+
+struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
+struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
-void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
+void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
@@ -1807,6 +1814,7 @@ int snd_soc_of_get_dai_name(struct device_node *of_node,
int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link);
+void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link);
int snd_soc_add_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
new file mode 100644
index 000000000000..aa86e7dba511
--- /dev/null
+++ b/include/trace/events/cachefiles.h
@@ -0,0 +1,325 @@
+/* CacheFiles tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cachefiles
+
+#if !defined(_TRACE_CACHEFILES_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CACHEFILES_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum cachefiles_obj_ref_trace {
+ cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces,
+ cachefiles_obj_put_wait_timeo,
+ cachefiles_obj_ref__nr_traces
+};
+
+#endif
+
+/*
+ * Define enum -> string mappings for display.
+ */
+#define cachefiles_obj_kill_traces \
+ EM(FSCACHE_OBJECT_IS_STALE, "stale") \
+ EM(FSCACHE_OBJECT_NO_SPACE, "no_space") \
+ EM(FSCACHE_OBJECT_WAS_RETIRED, "was_retired") \
+ E_(FSCACHE_OBJECT_WAS_CULLED, "was_culled")
+
+#define cachefiles_obj_ref_traces \
+ EM(fscache_obj_get_add_to_deps, "GET add_to_deps") \
+ EM(fscache_obj_get_queue, "GET queue") \
+ EM(fscache_obj_put_alloc_fail, "PUT alloc_fail") \
+ EM(fscache_obj_put_attach_fail, "PUT attach_fail") \
+ EM(fscache_obj_put_drop_obj, "PUT drop_obj") \
+ EM(fscache_obj_put_enq_dep, "PUT enq_dep") \
+ EM(fscache_obj_put_queue, "PUT queue") \
+ EM(fscache_obj_put_work, "PUT work") \
+ EM(cachefiles_obj_put_wait_retry, "PUT wait_retry") \
+ E_(cachefiles_obj_put_wait_timeo, "PUT wait_timeo")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+cachefiles_obj_kill_traces;
+cachefiles_obj_ref_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+
+TRACE_EVENT(cachefiles_ref,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct fscache_cookie *cookie,
+ enum cachefiles_obj_ref_trace why,
+ int usage),
+
+ TP_ARGS(obj, cookie, why, usage),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct fscache_cookie *, cookie )
+ __field(enum cachefiles_obj_ref_trace, why )
+ __field(int, usage )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->cookie = cookie;
+ __entry->usage = usage;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p o=%p u=%d %s",
+ __entry->cookie, __entry->obj, __entry->usage,
+ __print_symbolic(__entry->why, cachefiles_obj_ref_traces))
+ );
+
+TRACE_EVENT(cachefiles_lookup,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct inode *inode),
+
+ TP_ARGS(obj, de, inode),
+
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct inode *, inode )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->inode = inode;
+ ),
+
+ TP_printk("o=%p d=%p i=%p",
+ __entry->obj, __entry->de, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_mkdir,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de, int ret),
+
+ TP_ARGS(obj, de, ret),
+
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("o=%p d=%p r=%u",
+ __entry->obj, __entry->de, __entry->ret)
+ );
+
+TRACE_EVENT(cachefiles_create,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de, int ret),
+
+ TP_ARGS(obj, de, ret),
+
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("o=%p d=%p r=%u",
+ __entry->obj, __entry->de, __entry->ret)
+ );
+
+TRACE_EVENT(cachefiles_unlink,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ enum fscache_why_object_killed why),
+
+ TP_ARGS(obj, de, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(enum fscache_why_object_killed, why )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%p d=%p w=%s",
+ __entry->obj, __entry->de,
+ __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ );
+
+TRACE_EVENT(cachefiles_rename,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct dentry *to,
+ enum fscache_why_object_killed why),
+
+ TP_ARGS(obj, de, to, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct dentry *, to )
+ __field(enum fscache_why_object_killed, why )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->to = to;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%p d=%p t=%p w=%s",
+ __entry->obj, __entry->de, __entry->to,
+ __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ );
+
+TRACE_EVENT(cachefiles_mark_active,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de),
+
+ TP_ARGS(obj, de),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ ),
+
+ TP_printk("o=%p d=%p",
+ __entry->obj, __entry->de)
+ );
+
+TRACE_EVENT(cachefiles_wait_active,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct cachefiles_object *xobj),
+
+ TP_ARGS(obj, de, xobj),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct cachefiles_object *, xobj )
+ __field(u16, flags )
+ __field(u16, fsc_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->xobj = xobj;
+ __entry->flags = xobj->flags;
+ __entry->fsc_flags = xobj->fscache.flags;
+ ),
+
+ TP_printk("o=%p d=%p wo=%p wf=%x wff=%x",
+ __entry->obj, __entry->de, __entry->xobj,
+ __entry->flags, __entry->fsc_flags)
+ );
+
+TRACE_EVENT(cachefiles_mark_inactive,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct inode *inode),
+
+ TP_ARGS(obj, de, inode),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct inode *, inode )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->inode = inode;
+ ),
+
+ TP_printk("o=%p d=%p i=%p",
+ __entry->obj, __entry->de, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_mark_buried,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ enum fscache_why_object_killed why),
+
+ TP_ARGS(obj, de, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(enum fscache_why_object_killed, why )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%p d=%p w=%s",
+ __entry->obj, __entry->de,
+ __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ );
+
+#endif /* _TRACE_CACHEFILES_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
new file mode 100644
index 000000000000..686cfe997ed2
--- /dev/null
+++ b/include/trace/events/fscache.h
@@ -0,0 +1,537 @@
+/* FS-Cache tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fscache
+
+#if !defined(_TRACE_FSCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSCACHE_H
+
+#include <linux/fscache.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum fscache_cookie_trace {
+ fscache_cookie_collision,
+ fscache_cookie_discard,
+ fscache_cookie_get_acquire_parent,
+ fscache_cookie_get_attach_object,
+ fscache_cookie_get_reacquire,
+ fscache_cookie_get_register_netfs,
+ fscache_cookie_put_acquire_nobufs,
+ fscache_cookie_put_dup_netfs,
+ fscache_cookie_put_relinquish,
+ fscache_cookie_put_object,
+ fscache_cookie_put_parent,
+};
+
+enum fscache_page_trace {
+ fscache_page_cached,
+ fscache_page_inval,
+ fscache_page_maybe_release,
+ fscache_page_radix_clear_store,
+ fscache_page_radix_delete,
+ fscache_page_radix_insert,
+ fscache_page_radix_pend2store,
+ fscache_page_radix_set_pend,
+ fscache_page_uncache,
+ fscache_page_write,
+ fscache_page_write_end,
+ fscache_page_write_end_pend,
+ fscache_page_write_end_noc,
+ fscache_page_write_wait,
+ fscache_page_trace__nr
+};
+
+enum fscache_op_trace {
+ fscache_op_cancel,
+ fscache_op_cancel_all,
+ fscache_op_cancelled,
+ fscache_op_completed,
+ fscache_op_enqueue_async,
+ fscache_op_enqueue_mythread,
+ fscache_op_gc,
+ fscache_op_init,
+ fscache_op_put,
+ fscache_op_run,
+ fscache_op_signal,
+ fscache_op_submit,
+ fscache_op_submit_ex,
+ fscache_op_work,
+ fscache_op_trace__nr
+};
+
+enum fscache_page_op_trace {
+ fscache_page_op_alloc_one,
+ fscache_page_op_attr_changed,
+ fscache_page_op_check_consistency,
+ fscache_page_op_invalidate,
+ fscache_page_op_retr_multi,
+ fscache_page_op_retr_one,
+ fscache_page_op_write_one,
+ fscache_page_op_trace__nr
+};
+
+#endif
+
+/*
+ * Declare tracing information enums and their string mappings for display.
+ */
+#define fscache_cookie_traces \
+ EM(fscache_cookie_collision, "*COLLISION*") \
+ EM(fscache_cookie_discard, "DISCARD") \
+ EM(fscache_cookie_get_acquire_parent, "GET prn") \
+ EM(fscache_cookie_get_attach_object, "GET obj") \
+ EM(fscache_cookie_get_reacquire, "GET raq") \
+ EM(fscache_cookie_get_register_netfs, "GET net") \
+ EM(fscache_cookie_put_acquire_nobufs, "PUT nbf") \
+ EM(fscache_cookie_put_dup_netfs, "PUT dnt") \
+ EM(fscache_cookie_put_relinquish, "PUT rlq") \
+ EM(fscache_cookie_put_object, "PUT obj") \
+ E_(fscache_cookie_put_parent, "PUT prn")
+
+#define fscache_page_traces \
+ EM(fscache_page_cached, "Cached ") \
+ EM(fscache_page_inval, "InvalPg") \
+ EM(fscache_page_maybe_release, "MayRels") \
+ EM(fscache_page_uncache, "Uncache") \
+ EM(fscache_page_radix_clear_store, "RxCStr ") \
+ EM(fscache_page_radix_delete, "RxDel ") \
+ EM(fscache_page_radix_insert, "RxIns ") \
+ EM(fscache_page_radix_pend2store, "RxP2S ") \
+ EM(fscache_page_radix_set_pend, "RxSPend ") \
+ EM(fscache_page_write, "WritePg") \
+ EM(fscache_page_write_end, "EndPgWr") \
+ EM(fscache_page_write_end_pend, "EndPgWP") \
+ EM(fscache_page_write_end_noc, "EndPgNC") \
+ E_(fscache_page_write_wait, "WtOnWrt")
+
+#define fscache_op_traces \
+ EM(fscache_op_cancel, "Cancel1") \
+ EM(fscache_op_cancel_all, "CancelA") \
+ EM(fscache_op_cancelled, "Canclld") \
+ EM(fscache_op_completed, "Complet") \
+ EM(fscache_op_enqueue_async, "EnqAsyn") \
+ EM(fscache_op_enqueue_mythread, "EnqMyTh") \
+ EM(fscache_op_gc, "GC ") \
+ EM(fscache_op_init, "Init ") \
+ EM(fscache_op_put, "Put ") \
+ EM(fscache_op_run, "Run ") \
+ EM(fscache_op_signal, "Signal ") \
+ EM(fscache_op_submit, "Submit ") \
+ EM(fscache_op_submit_ex, "SubmitX") \
+ E_(fscache_op_work, "Work ")
+
+#define fscache_page_op_traces \
+ EM(fscache_page_op_alloc_one, "Alloc1 ") \
+ EM(fscache_page_op_attr_changed, "AttrChg") \
+ EM(fscache_page_op_check_consistency, "CheckCn") \
+ EM(fscache_page_op_invalidate, "Inval ") \
+ EM(fscache_page_op_retr_multi, "RetrMul") \
+ EM(fscache_page_op_retr_one, "Retr1 ") \
+ E_(fscache_page_op_write_one, "Write1 ")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+fscache_cookie_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+
+TRACE_EVENT(fscache_cookie,
+ TP_PROTO(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where,
+ int usage),
+
+ TP_ARGS(cookie, where, usage),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_cookie *, parent )
+ __field(enum fscache_cookie_trace, where )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->parent = cookie->parent;
+ __entry->where = where;
+ __entry->usage = usage;
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ ),
+
+ TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x",
+ __print_symbolic(__entry->where, fscache_cookie_traces),
+ __entry->cookie, __entry->usage,
+ __entry->parent, __entry->n_children, __entry->n_active,
+ __entry->flags)
+ );
+
+TRACE_EVENT(fscache_netfs,
+ TP_PROTO(struct fscache_netfs *netfs),
+
+ TP_ARGS(netfs),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __array(char, name, 8 )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = netfs->primary_index;
+ strncpy(__entry->name, netfs->name, 8);
+ __entry->name[7] = 0;
+ ),
+
+ TP_printk("c=%p n=%s",
+ __entry->cookie, __entry->name)
+ );
+
+TRACE_EVENT(fscache_acquire,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_cookie *, parent )
+ __array(char, name, 8 )
+ __field(int, p_usage )
+ __field(int, p_n_children )
+ __field(u8, p_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->parent = cookie->parent;
+ __entry->p_usage = atomic_read(&cookie->parent->usage);
+ __entry->p_n_children = atomic_read(&cookie->parent->n_children);
+ __entry->p_flags = cookie->parent->flags;
+ memcpy(__entry->name, cookie->def->name, 8);
+ __entry->name[7] = 0;
+ ),
+
+ TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s",
+ __entry->cookie, __entry->parent, __entry->p_usage,
+ __entry->p_n_children, __entry->p_flags, __entry->name)
+ );
+
+TRACE_EVENT(fscache_relinquish,
+ TP_PROTO(struct fscache_cookie *cookie, bool retire),
+
+ TP_ARGS(cookie, retire),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_cookie *, parent )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ __field(bool, retire )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->parent = cookie->parent;
+ __entry->usage = atomic_read(&cookie->usage);
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ __entry->retire = retire;
+ ),
+
+ TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u",
+ __entry->cookie, __entry->usage,
+ __entry->parent, __entry->n_children, __entry->n_active,
+ __entry->flags, __entry->retire)
+ );
+
+TRACE_EVENT(fscache_enable,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->usage = atomic_read(&cookie->usage);
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ ),
+
+ TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
+ __entry->cookie, __entry->usage,
+ __entry->n_children, __entry->n_active, __entry->flags)
+ );
+
+TRACE_EVENT(fscache_disable,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->usage = atomic_read(&cookie->usage);
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ ),
+
+ TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
+ __entry->cookie, __entry->usage,
+ __entry->n_children, __entry->n_active, __entry->flags)
+ );
+
+TRACE_EVENT(fscache_osm,
+ TP_PROTO(struct fscache_object *object,
+ const struct fscache_state *state,
+ bool wait, bool oob, s8 event_num),
+
+ TP_ARGS(object, state, wait, oob, event_num),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_object *, object )
+ __array(char, state, 8 )
+ __field(bool, wait )
+ __field(bool, oob )
+ __field(s8, event_num )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = object->cookie;
+ __entry->object = object;
+ __entry->wait = wait;
+ __entry->oob = oob;
+ __entry->event_num = event_num;
+ memcpy(__entry->state, state->short_name, 8);
+ ),
+
+ TP_printk("c=%p o=%p %s %s%sev=%d",
+ __entry->cookie,
+ __entry->object,
+ __entry->state,
+ __print_symbolic(__entry->wait,
+ { true, "WAIT" },
+ { false, "WORK" }),
+ __print_symbolic(__entry->oob,
+ { true, " OOB " },
+ { false, " " }),
+ __entry->event_num)
+ );
+
+TRACE_EVENT(fscache_page,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ enum fscache_page_trace why),
+
+ TP_ARGS(cookie, page, why),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(pgoff_t, page )
+ __field(enum fscache_page_trace, why )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page->index;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p %s pg=%lx",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_page_traces),
+ __entry->page)
+ );
+
+TRACE_EVENT(fscache_check_page,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ void *val, int n),
+
+ TP_ARGS(cookie, page, val, n),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(void *, page )
+ __field(void *, val )
+ __field(int, n )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page;
+ __entry->val = val;
+ __entry->n = n;
+ ),
+
+ TP_printk("c=%p pg=%p val=%p n=%d",
+ __entry->cookie, __entry->page, __entry->val, __entry->n)
+ );
+
+TRACE_EVENT(fscache_wake_cookie,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ ),
+
+ TP_printk("c=%p", __entry->cookie)
+ );
+
+TRACE_EVENT(fscache_op,
+ TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
+ enum fscache_op_trace why),
+
+ TP_ARGS(cookie, op, why),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_operation *, op )
+ __field(enum fscache_op_trace, why )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->op = op;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p op=%p %s",
+ __entry->cookie, __entry->op,
+ __print_symbolic(__entry->why, fscache_op_traces))
+ );
+
+TRACE_EVENT(fscache_page_op,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ struct fscache_operation *op, enum fscache_page_op_trace what),
+
+ TP_ARGS(cookie, page, op, what),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(pgoff_t, page )
+ __field(struct fscache_operation *, op )
+ __field(enum fscache_page_op_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page ? page->index : 0;
+ __entry->op = op;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%p %s pg=%lx op=%p",
+ __entry->cookie,
+ __print_symbolic(__entry->what, fscache_page_op_traces),
+ __entry->page, __entry->op)
+ );
+
+TRACE_EVENT(fscache_wrote_page,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ struct fscache_operation *op, int ret),
+
+ TP_ARGS(cookie, page, op, ret),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(pgoff_t, page )
+ __field(struct fscache_operation *, op )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page->index;
+ __entry->op = op;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p pg=%lx op=%p ret=%d",
+ __entry->cookie, __entry->page, __entry->op, __entry->ret)
+ );
+
+TRACE_EVENT(fscache_gang_lookup,
+ TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
+ void **results, int n, pgoff_t store_limit),
+
+ TP_ARGS(cookie, op, results, n, store_limit),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_operation *, op )
+ __field(pgoff_t, results0 )
+ __field(int, n )
+ __field(pgoff_t, store_limit )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->op = op;
+ __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
+ __entry->n = n;
+ __entry->store_limit = store_limit;
+ ),
+
+ TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx",
+ __entry->cookie, __entry->op, __entry->results0, __entry->n,
+ __entry->store_limit)
+ );
+
+#endif /* _TRACE_FSCACHE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index bcf4daccd6be..711372845945 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -20,7 +20,7 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
- EMe(MR_CMA, "cma")
+ EMe(MR_CONTIG_RANGE, "contig_range")
/*
* First define the enums in the above macros to be exported to userspace
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 970c91a83173..922cb8968fb2 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -485,31 +485,55 @@ TRACE_EVENT(xs_tcp_data_recv,
{ (1UL << RQ_BUSY), "RQ_BUSY"})
TRACE_EVENT(svc_recv,
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(struct svc_rqst *rqst, int len),
- TP_ARGS(rqst, status),
+ TP_ARGS(rqst, len),
TP_STRUCT__entry(
__field(u32, xid)
- __field(int, status)
+ __field(int, len)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0;
- __entry->status = status;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->len = len;
__entry->flags = rqst->rq_flags;
- memcpy(__get_dynamic_array(addr),
- &rqst->rq_addr, rqst->rq_addrlen);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid, __entry->status,
+ TP_printk("addr=%s xid=0x%08x len=%d flags=%s",
+ __get_str(addr), __entry->xid, __entry->len,
show_rqstp_flags(__entry->flags))
);
+TRACE_EVENT(svc_process,
+ TP_PROTO(const struct svc_rqst *rqst, const char *name),
+
+ TP_ARGS(rqst, name),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, vers)
+ __field(u32, proc)
+ __string(service, name)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->vers = rqst->rq_vers;
+ __entry->proc = rqst->rq_proc;
+ __assign_str(service, name);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
+ __get_str(addr), __entry->xid,
+ __get_str(service), __entry->vers, __entry->proc)
+);
+
DECLARE_EVENT_CLASS(svc_rqst_event,
TP_PROTO(struct svc_rqst *rqst),
@@ -519,20 +543,18 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_STRUCT__entry(
__field(u32, xid)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->flags = rqst->rq_flags;
- memcpy(__get_dynamic_array(addr),
- &rqst->rq_addr, rqst->rq_addrlen);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp rq_xid=0x%08x flags=%s",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid,
- show_rqstp_flags(__entry->flags))
+ TP_printk("addr=%s xid=0x%08x flags=%s",
+ __get_str(addr), __entry->xid,
+ show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_event, svc_defer,
@@ -553,27 +575,21 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
__field(u32, xid)
__field(int, status)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->status = status;
__entry->flags = rqst->rq_flags;
- memcpy(__get_dynamic_array(addr),
- &rqst->rq_addr, rqst->rq_addrlen);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid,
- __entry->status, show_rqstp_flags(__entry->flags))
+ TP_printk("addr=%s xid=0x%08x status=%d flags=%s",
+ __get_str(addr), __entry->xid,
+ __entry->status, show_rqstp_flags(__entry->flags))
);
-DEFINE_EVENT(svc_rqst_status, svc_process,
- TP_PROTO(struct svc_rqst *rqst, int status),
- TP_ARGS(rqst, status));
-
DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
@@ -591,7 +607,9 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
{ (1UL << XPT_OLD), "XPT_OLD"}, \
{ (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
{ (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
- { (1UL << XPT_LOCAL), "XPT_LOCAL"})
+ { (1UL << XPT_LOCAL), "XPT_LOCAL"}, \
+ { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
+ { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
TRACE_EVENT(svc_xprt_do_enqueue,
TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
@@ -602,26 +620,19 @@ TRACE_EVENT(svc_xprt_do_enqueue,
__field(struct svc_xprt *, xprt)
__field(int, pid)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, xprt != NULL ?
- xprt->xpt_remotelen : 0)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xprt = xprt;
__entry->pid = rqst? rqst->rq_task->pid : 0;
- if (xprt) {
- memcpy(__get_dynamic_array(addr),
- &xprt->xpt_remote,
- xprt->xpt_remotelen);
- __entry->flags = xprt->xpt_flags;
- } else
- __entry->flags = 0;
- ),
-
- TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
- __get_dynamic_array_len(addr) != 0 ?
- (struct sockaddr *)__get_dynamic_array(addr) : NULL,
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s pid=%d flags=%s",
+ __entry->xprt, __get_str(addr),
+ __entry->pid, show_svc_xprt_flags(__entry->flags))
);
DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -632,35 +643,50 @@ DECLARE_EVENT_CLASS(svc_xprt_event,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, xprt != NULL ?
- xprt->xpt_remotelen : 0)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xprt = xprt;
- if (xprt) {
- memcpy(__get_dynamic_array(addr),
- &xprt->xpt_remote,
- xprt->xpt_remotelen);
- __entry->flags = xprt->xpt_flags;
- } else
- __entry->flags = 0;
- ),
-
- TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
- __get_dynamic_array_len(addr) != 0 ?
- (struct sockaddr *)__get_dynamic_array(addr) : NULL,
- show_svc_xprt_flags(__entry->flags))
-);
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
-DEFINE_EVENT(svc_xprt_event, svc_xprt_dequeue,
- TP_PROTO(struct svc_xprt *xprt),
- TP_ARGS(xprt));
+ TP_printk("xprt=%p addr=%s flags=%s",
+ __entry->xprt, __get_str(addr),
+ show_svc_xprt_flags(__entry->flags))
+);
DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
TP_PROTO(struct svc_xprt *xprt),
TP_ARGS(xprt));
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(unsigned long, flags)
+ __field(unsigned long, wakeup)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = rqst->rq_xprt;
+ __entry->flags = rqst->rq_xprt->xpt_flags;
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_qtime));
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu",
+ __entry->xprt, __get_str(addr),
+ show_svc_xprt_flags(__entry->flags),
+ __entry->wakeup)
+);
+
TRACE_EVENT(svc_wake_up,
TP_PROTO(int pid),
@@ -686,28 +712,42 @@ TRACE_EVENT(svc_handle_xprt,
__field(struct svc_xprt *, xprt)
__field(int, len)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, xprt != NULL ?
- xprt->xpt_remotelen : 0)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xprt = xprt;
__entry->len = len;
- if (xprt) {
- memcpy(__get_dynamic_array(addr),
- &xprt->xpt_remote,
- xprt->xpt_remotelen);
- __entry->flags = xprt->xpt_flags;
- } else
- __entry->flags = 0;
- ),
-
- TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
- __get_dynamic_array_len(addr) != 0 ?
- (struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s len=%d flags=%s",
+ __entry->xprt, __get_str(addr),
__entry->len, show_svc_xprt_flags(__entry->flags))
);
+TRACE_EVENT(svc_stats_latency,
+ TP_PROTO(const struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(unsigned long, execute)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_stime));
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x execute-us=%lu",
+ __get_str(addr), __entry->xid, __entry->execute)
+);
DECLARE_EVENT_CLASS(svc_deferred_event,
TP_PROTO(struct svc_deferred_req *dr),
@@ -716,18 +756,16 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_STRUCT__entry(
__field(u32, xid)
- __dynamic_array(unsigned char, addr, dr->addrlen)
+ __string(addr, dr->xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
- memcpy(__get_dynamic_array(addr), &dr->addr, dr->addrlen);
+ __assign_str(addr, dr->xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp xid=0x%08x",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid)
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
);
DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index e0b8b9173e1c..6570c5b45ba1 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -78,26 +78,29 @@ TRACE_EVENT(mm_vmscan_kswapd_wake,
TRACE_EVENT(mm_vmscan_wakeup_kswapd,
- TP_PROTO(int nid, int zid, int order),
+ TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
- TP_ARGS(nid, zid, order),
+ TP_ARGS(nid, zid, order, gfp_flags),
TP_STRUCT__entry(
- __field( int, nid )
- __field( int, zid )
- __field( int, order )
+ __field( int, nid )
+ __field( int, zid )
+ __field( int, order )
+ __field( gfp_t, gfp_flags )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
+ __entry->gfp_flags = gfp_flags;
),
- TP_printk("nid=%d zid=%d order=%d",
+ TP_printk("nid=%d zid=%d order=%d gfp_flags=%s",
__entry->nid,
__entry->zid,
- __entry->order)
+ __entry->order,
+ show_gfp_flags(__entry->gfp_flags))
);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 6088bca89917..544208fd3db1 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -94,6 +94,9 @@ typedef struct siginfo {
unsigned int _flags; /* see ia64 si_flags */
unsigned long _isr; /* isr */
#endif
+
+#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
+ sizeof(short) : __alignof__(void *))
union {
/*
* used when si_code=BUS_MCEERR_AR or
@@ -102,13 +105,13 @@ typedef struct siginfo {
short _addr_lsb; /* LSB of the reported address */
/* used when si_code=SEGV_BNDERR */
struct {
- void *_dummy_bnd;
+ char _dummy_bnd[__ADDR_BND_PKEY_PAD];
void __user *_lower;
void __user *_upper;
} _addr_bnd;
/* used when si_code=SEGV_PKUERR */
struct {
- void *_dummy_pkey;
+ char _dummy_pkey[__ADDR_BND_PKEY_PAD];
__u32 _pkey;
} _addr_pkey;
};
diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h
index 3c50e07ee833..690621b610e5 100644
--- a/include/uapi/linux/blktrace_api.h
+++ b/include/uapi/linux/blktrace_api.h
@@ -101,7 +101,7 @@ enum blktrace_notify {
struct blk_io_trace {
__u32 magic; /* MAGIC << 8 | version */
__u32 sequence; /* event number */
- __u64 time; /* in microseconds */
+ __u64 time; /* in nanoseconds */
__u64 sector; /* disk offset */
__u32 bytes; /* transfer length */
__u32 action; /* what happened */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 14c44ec8b622..d1e49514977b 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -270,9 +270,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 37
+#define DM_VERSION_MINOR 39
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2017-09-20)"
+#define DM_VERSION_EXTRA "-ioctl (2018-04-03)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h
index 5474461683db..4800bf2a531d 100644
--- a/include/uapi/linux/inotify.h
+++ b/include/uapi/linux/inotify.h
@@ -71,5 +71,13 @@ struct inotify_event {
#define IN_CLOEXEC O_CLOEXEC
#define IN_NONBLOCK O_NONBLOCK
+/*
+ * ioctl numbers: inotify uses 'I' prefix for all ioctls,
+ * except historical FIONREAD, which is based on 'T'.
+ *
+ * INOTIFY_IOC_SETNEXTWD: set desired number of next created
+ * watch descriptor.
+ */
+#define INOTIFY_IOC_SETNEXTWD _IOW('I', 0, __s32)
#endif /* _UAPI_LINUX_INOTIFY_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 7b26d4b0b052..1065006c9bf5 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -396,6 +396,10 @@ struct kvm_run {
char padding[256];
};
+ /* 2048 is the size of the char array used to bound/pad the size
+ * of the union that holds sync regs.
+ */
+ #define SYNC_REGS_SIZE_BYTES 2048
/*
* shared registers between kvm and userspace.
* kvm_valid_regs specifies the register classes set by the host
@@ -407,7 +411,7 @@ struct kvm_run {
__u64 kvm_dirty_regs;
union {
struct kvm_sync_regs regs;
- char padding[2048];
+ char padding[SYNC_REGS_SIZE_BYTES];
} s;
};
@@ -925,7 +929,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_GS 140
#define KVM_CAP_S390_AIS 141
#define KVM_CAP_SPAPR_TCE_VFIO 142
-#define KVM_CAP_X86_GUEST_MWAIT 143
+#define KVM_CAP_X86_DISABLE_EXITS 143
#define KVM_CAP_ARM_USER_IRQ 144
#define KVM_CAP_S390_CMMA_MIGRATION 145
#define KVM_CAP_PPC_FWNMI 146
@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_GET_CPU_CHAR 151
#define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153
+#define KVM_CAP_HYPERV_EVENTFD 154
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1375,6 +1380,10 @@ struct kvm_enc_region {
#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
+/* Available with KVM_CAP_HYPERV_EVENTFD */
+#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
+
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry {
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2)
+struct kvm_hyperv_eventfd {
+ __u32 conn_id;
+ __s32 fd;
+ __u32 flags;
+ __u32 padding[3];
+};
+
+#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
+#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index a45d0754102e..fde753735aba 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -10,7 +10,9 @@
* The MS-DOS filesystem constants/structures
*/
+#ifndef SECTOR_SIZE
#define SECTOR_SIZE 512 /* sector size (bytes) */
+#endif
#define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */
#define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */
#define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 0c79eac5e9b8..103ba797a8f3 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -520,6 +520,7 @@
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
+#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
@@ -547,6 +548,7 @@
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
@@ -648,8 +650,9 @@
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
-#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */
-#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
diff --git a/include/uapi/linux/qemu_fw_cfg.h b/include/uapi/linux/qemu_fw_cfg.h
new file mode 100644
index 000000000000..e089c0159ec2
--- /dev/null
+++ b/include/uapi/linux/qemu_fw_cfg.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+#ifndef _LINUX_FW_CFG_H
+#define _LINUX_FW_CFG_H
+
+#include <linux/types.h>
+
+#define FW_CFG_ACPI_DEVICE_ID "QEMU0002"
+
+/* selector key values for "well-known" fw_cfg entries */
+#define FW_CFG_SIGNATURE 0x00
+#define FW_CFG_ID 0x01
+#define FW_CFG_UUID 0x02
+#define FW_CFG_RAM_SIZE 0x03
+#define FW_CFG_NOGRAPHIC 0x04
+#define FW_CFG_NB_CPUS 0x05
+#define FW_CFG_MACHINE_ID 0x06
+#define FW_CFG_KERNEL_ADDR 0x07
+#define FW_CFG_KERNEL_SIZE 0x08
+#define FW_CFG_KERNEL_CMDLINE 0x09
+#define FW_CFG_INITRD_ADDR 0x0a
+#define FW_CFG_INITRD_SIZE 0x0b
+#define FW_CFG_BOOT_DEVICE 0x0c
+#define FW_CFG_NUMA 0x0d
+#define FW_CFG_BOOT_MENU 0x0e
+#define FW_CFG_MAX_CPUS 0x0f
+#define FW_CFG_KERNEL_ENTRY 0x10
+#define FW_CFG_KERNEL_DATA 0x11
+#define FW_CFG_INITRD_DATA 0x12
+#define FW_CFG_CMDLINE_ADDR 0x13
+#define FW_CFG_CMDLINE_SIZE 0x14
+#define FW_CFG_CMDLINE_DATA 0x15
+#define FW_CFG_SETUP_ADDR 0x16
+#define FW_CFG_SETUP_SIZE 0x17
+#define FW_CFG_SETUP_DATA 0x18
+#define FW_CFG_FILE_DIR 0x19
+
+#define FW_CFG_FILE_FIRST 0x20
+#define FW_CFG_FILE_SLOTS_MIN 0x10
+
+#define FW_CFG_WRITE_CHANNEL 0x4000
+#define FW_CFG_ARCH_LOCAL 0x8000
+#define FW_CFG_ENTRY_MASK (~(FW_CFG_WRITE_CHANNEL | FW_CFG_ARCH_LOCAL))
+
+#define FW_CFG_INVALID 0xffff
+
+/* width in bytes of fw_cfg control register */
+#define FW_CFG_CTL_SIZE 0x02
+
+/* fw_cfg "file name" is up to 56 characters (including terminating nul) */
+#define FW_CFG_MAX_FILE_PATH 56
+
+/* size in bytes of fw_cfg signature */
+#define FW_CFG_SIG_SIZE 4
+
+/* FW_CFG_ID bits */
+#define FW_CFG_VERSION 0x01
+#define FW_CFG_VERSION_DMA 0x02
+
+/* fw_cfg file directory entry type */
+struct fw_cfg_file {
+ __be32 size;
+ __be16 select;
+ __u16 reserved;
+ char name[FW_CFG_MAX_FILE_PATH];
+};
+
+/* FW_CFG_DMA_CONTROL bits */
+#define FW_CFG_DMA_CTL_ERROR 0x01
+#define FW_CFG_DMA_CTL_READ 0x02
+#define FW_CFG_DMA_CTL_SKIP 0x04
+#define FW_CFG_DMA_CTL_SELECT 0x08
+#define FW_CFG_DMA_CTL_WRITE 0x10
+
+#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
+
+/* Control as first field allows for different structures selected by this
+ * field, which might be useful in the future
+ */
+struct fw_cfg_dma_access {
+ __be32 control;
+ __be32 length;
+ __be64 address;
+};
+
+#define FW_CFG_VMCOREINFO_FILENAME "etc/vmcoreinfo"
+
+#define FW_CFG_VMCOREINFO_FORMAT_NONE 0x0
+#define FW_CFG_VMCOREINFO_FORMAT_ELF 0x1
+
+struct fw_cfg_vmcoreinfo {
+ __le16 host_format;
+ __le16 guest_format;
+ __le32 size;
+ __le64 paddr;
+};
+
+#endif
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index afd4346386e0..b64d583bf053 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -127,6 +127,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_STREAM_SCHEDULER 123
#define SCTP_STREAM_SCHEDULER_VALUE 124
#define SCTP_INTERLEAVING_SUPPORTED 125
+#define SCTP_SENDMSG_CONNECT 126
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index da3315ed1bcd..3a78e7145689 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -27,6 +27,7 @@
/* bInterfaceProtocol values to denote the version of the standard used */
#define UAC_VERSION_1 0x00
#define UAC_VERSION_2 0x20
+#define UAC_VERSION_3 0x30
/* A.2 Audio Interface Subclass Codes */
#define USB_SUBCLASS_AUDIOCONTROL 0x01
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index c74372163ed2..1aa7b82e8169 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -575,6 +575,33 @@ struct vfio_device_gfx_plane_info {
#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
+/**
+ * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
+ * struct vfio_device_ioeventfd)
+ *
+ * Perform a write to the device at the specified device fd offset, with
+ * the specified data and width when the provided eventfd is triggered.
+ * vfio bus drivers may not support this for all regions, for all widths,
+ * or at all. vfio-pci currently only enables support for BAR regions,
+ * excluding the MSI-X vector table.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_ioeventfd {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */
+#define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */
+#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
+#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
+#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
+ __u64 offset; /* device fd offset of write */
+ __u64 data; /* data to be written */
+ __s32 fd; /* -1 for de-assignment */
+};
+
+#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index db54115be044..a7a6111e50c7 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,15 +53,20 @@ struct bnxt_re_uctx_resp {
__u32 rsvd;
};
+/*
+ * This struct is placed after the ib_uverbs_alloc_pd_resp struct, which is
+ * not 8 byted aligned. To avoid undesired padding in various cases we have to
+ * set this struct to packed.
+ */
struct bnxt_re_pd_resp {
__u32 pdid;
__u32 dpi;
__u64 dbr;
-};
+} __attribute__((packed, aligned(4)));
struct bnxt_re_cq_req {
- __u64 cq_va;
- __u64 cq_handle;
+ __aligned_u64 cq_va;
+ __aligned_u64 cq_handle;
};
struct bnxt_re_cq_resp {
@@ -72,9 +77,9 @@ struct bnxt_re_cq_resp {
};
struct bnxt_re_qp_req {
- __u64 qpsva;
- __u64 qprva;
- __u64 qp_handle;
+ __aligned_u64 qpsva;
+ __aligned_u64 qprva;
+ __aligned_u64 qp_handle;
};
struct bnxt_re_qp_resp {
@@ -83,8 +88,8 @@ struct bnxt_re_qp_resp {
};
struct bnxt_re_srq_req {
- __u64 srqva;
- __u64 srq_handle;
+ __aligned_u64 srqva;
+ __aligned_u64 srq_handle;
};
struct bnxt_re_srq_resp {
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index d5745e43ae85..9acb4b7a6246 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -41,21 +41,21 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct iwch_create_cq_req {
- __u64 user_rptr_addr;
+ __aligned_u64 user_rptr_addr;
};
struct iwch_create_cq_resp_v0 {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
};
struct iwch_create_cq_resp {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
__u32 memsize;
@@ -63,8 +63,8 @@ struct iwch_create_cq_resp {
};
struct iwch_create_qp_resp {
- __u64 key;
- __u64 db_key;
+ __aligned_u64 key;
+ __aligned_u64 db_key;
__u32 qpid;
__u32 size_log2;
__u32 sq_size_log2;
@@ -74,4 +74,9 @@ struct iwch_create_qp_resp {
struct iwch_reg_user_mr_resp {
__u32 pbl_addr;
};
+
+struct iwch_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
index 05f71f1bc119..1fefd0140c26 100644
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -41,13 +41,13 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct c4iw_create_cq_resp {
- __u64 key;
- __u64 gts_key;
- __u64 memsize;
+ __aligned_u64 key;
+ __aligned_u64 gts_key;
+ __aligned_u64 memsize;
__u32 cqid;
__u32 size;
__u32 qid_mask;
@@ -59,13 +59,13 @@ enum {
};
struct c4iw_create_qp_resp {
- __u64 ma_sync_key;
- __u64 sq_key;
- __u64 rq_key;
- __u64 sq_db_gts_key;
- __u64 rq_db_gts_key;
- __u64 sq_memsize;
- __u64 rq_memsize;
+ __aligned_u64 ma_sync_key;
+ __aligned_u64 sq_key;
+ __aligned_u64 rq_key;
+ __aligned_u64 sq_db_gts_key;
+ __aligned_u64 rq_db_gts_key;
+ __aligned_u64 sq_memsize;
+ __aligned_u64 rq_memsize;
__u32 sqid;
__u32 rqid;
__u32 sq_size;
@@ -75,8 +75,13 @@ struct c4iw_create_qp_resp {
};
struct c4iw_alloc_ucontext_resp {
- __u64 status_page_key;
+ __aligned_u64 status_page_key;
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
+
+struct c4iw_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB4_ABI_USER_H */
diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h
index 9de78c5ee913..8f3d9fe7b141 100644
--- a/include/uapi/rdma/hfi/hfi1_ioctl.h
+++ b/include/uapi/rdma/hfi/hfi1_ioctl.h
@@ -79,7 +79,7 @@ struct hfi1_user_info {
};
struct hfi1_ctxt_info {
- __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
+ __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__u32 rcvegr_size; /* size of each eager buffer */
__u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */
@@ -98,9 +98,9 @@ struct hfi1_ctxt_info {
struct hfi1_tid_info {
/* virtual address of first page in transfer */
- __u64 vaddr;
+ __aligned_u64 vaddr;
/* pointer to tid array. this array is big enough */
- __u64 tidlist;
+ __aligned_u64 tidlist;
/* number of tids programmed by this request */
__u32 tidcnt;
/* length of transfer buffer programmed by this request */
@@ -131,23 +131,23 @@ struct hfi1_base_info {
*/
__u32 bthqp;
/* PIO credit return address, */
- __u64 sc_credits_addr;
+ __aligned_u64 sc_credits_addr;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase_sop;
+ __aligned_u64 pio_bufbase_sop;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase;
+ __aligned_u64 pio_bufbase;
/* address where receive buffer queue is mapped into */
- __u64 rcvhdr_bufbase;
+ __aligned_u64 rcvhdr_bufbase;
/* base address of Eager receive buffers. */
- __u64 rcvegr_bufbase;
+ __aligned_u64 rcvegr_bufbase;
/* base address of SDMA completion ring */
- __u64 sdma_comp_bufbase;
+ __aligned_u64 sdma_comp_bufbase;
/*
* User register base for init code, not to be used directly by
* protocol or applications. Always maps real chip register space.
@@ -155,20 +155,20 @@ struct hfi1_base_info {
* ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
* ur_rcvtidflow
*/
- __u64 user_regbase;
+ __aligned_u64 user_regbase;
/* notification events */
- __u64 events_bufbase;
+ __aligned_u64 events_bufbase;
/* status page */
- __u64 status_bufbase;
+ __aligned_u64 status_bufbase;
/* rcvhdrtail update */
- __u64 rcvhdrtail_base;
+ __aligned_u64 rcvhdrtail_base;
/*
* shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job.
*/
- __u64 subctxt_uregbase;
- __u64 subctxt_rcvegrbuf;
- __u64 subctxt_rcvhdrbuf;
+ __aligned_u64 subctxt_uregbase;
+ __aligned_u64 subctxt_rcvegrbuf;
+ __aligned_u64 subctxt_rcvhdrbuf;
};
#endif /* _LINIUX__HFI1_IOCTL_H */
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 791bea2f8297..c6a984c0c881 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry {
* Device status and notifications from driver to user-space.
*/
struct hfi1_status {
- __u64 dev; /* device/hw status bits */
- __u64 port; /* port state and status bits */
+ __aligned_u64 dev; /* device/hw status bits */
+ __aligned_u64 port; /* port state and status bits */
char freezemsg[0];
};
@@ -219,7 +219,7 @@ struct sdma_req_info {
* in charge of managing its own ring.
*/
__u16 comp_idx;
-} __packed;
+} __attribute__((__packed__));
/*
* SW KDETH header.
@@ -230,7 +230,7 @@ struct hfi1_kdeth_header {
__le16 jkey;
__le16 hcrc;
__le32 swdata[7];
-} __packed;
+} __attribute__((__packed__));
/*
* Structure describing the headers that User space uses. The
@@ -241,7 +241,7 @@ struct hfi1_pkt_header {
__be16 lrh[4];
__be32 bth[3];
struct hfi1_kdeth_header kdeth;
-} __packed;
+} __attribute__((__packed__));
/*
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index a9c03b0eed57..7092c8de4bd8 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -37,19 +37,35 @@
#include <linux/types.h>
struct hns_roce_ib_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
+};
+
+struct hns_roce_ib_create_cq_resp {
+ __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
+ __aligned_u64 cap_flags;
};
struct hns_roce_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
};
+struct hns_roce_ib_create_qp_resp {
+ __aligned_u64 cap_flags;
+};
+
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
+ __u32 reserved;
};
+
+struct hns_roce_ib_alloc_pd_resp {
+ __u32 pdn;
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h
new file mode 100644
index 000000000000..79890baa6fdb
--- /dev/null
+++ b/include/uapi/rdma/i40iw-abi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_ABI_H
+#define I40IW_ABI_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_VER 5
+
+struct i40iw_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 kernel_ver;
+ __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+ __aligned_u64 user_cq_buffer;
+ __aligned_u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_compl_ctx;
+
+ /* UDA QP PHB */
+ __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
+ __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+ IW_MEMREG_TYPE_MEM = 0x0000,
+ IW_MEMREG_TYPE_QP = 0x0001,
+ IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+ __u16 reg_type; /* Memory, QP or CQ */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 i40iw_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd2;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h
index f4041bdc4d08..4a8f9562f7cd 100644
--- a/include/uapi/rdma/ib_user_cm.h
+++ b/include/uapi/rdma/ib_user_cm.h
@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr {
};
struct ib_ucm_create_id {
- __u64 uid;
- __u64 response;
+ __aligned_u64 uid;
+ __aligned_u64 response;
};
struct ib_ucm_create_id_resp {
@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp {
};
struct ib_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp {
};
struct ib_ucm_attr_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp {
};
struct ib_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -123,7 +123,7 @@ struct ib_ucm_notify {
};
struct ib_ucm_private_data {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -135,9 +135,9 @@ struct ib_ucm_req {
__u32 qp_type;
__u32 psn;
__be64 sid;
- __u64 data;
- __u64 primary_path;
- __u64 alternate_path;
+ __aligned_u64 data;
+ __aligned_u64 primary_path;
+ __aligned_u64 alternate_path;
__u8 len;
__u8 peer_to_peer;
__u8 responder_resources;
@@ -153,8 +153,8 @@ struct ib_ucm_req {
};
struct ib_ucm_rep {
- __u64 uid;
- __u64 data;
+ __aligned_u64 uid;
+ __aligned_u64 data;
__u32 id;
__u32 qpn;
__u32 psn;
@@ -172,15 +172,15 @@ struct ib_ucm_rep {
struct ib_ucm_info {
__u32 id;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
};
struct ib_ucm_mra {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 timeout;
@@ -188,8 +188,8 @@ struct ib_ucm_mra {
};
struct ib_ucm_lap {
- __u64 path;
- __u64 data;
+ __aligned_u64 path;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req {
__u32 id;
__u32 timeout;
__be64 sid;
- __u64 data;
- __u64 path;
+ __aligned_u64 data;
+ __aligned_u64 path;
__u16 reserved_pkey;
__u8 len;
__u8 max_cm_retries;
@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep {
__u32 qpn;
__u32 qkey;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep {
* event notification ABI structures.
*/
struct ib_ucm_event_get {
- __u64 response;
- __u64 data;
- __u64 info;
+ __aligned_u64 response;
+ __aligned_u64 data;
+ __aligned_u64 info;
__u8 data_len;
__u8 info_len;
__u8 reserved[6];
@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 present;
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
new file mode 100644
index 000000000000..83e3890eef20
--- /dev/null
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_USER_IOCTL_CMDS_H
+#define IB_USER_IOCTL_CMDS_H
+
+#define UVERBS_ID_NS_MASK 0xF000
+#define UVERBS_ID_NS_SHIFT 12
+
+#define UVERBS_UDATA_DRIVER_DATA_NS 1
+#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
+
+enum uverbs_default_objects {
+ UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
+ UVERBS_OBJECT_PD,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_OBJECT_CQ,
+ UVERBS_OBJECT_QP,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_OBJECT_AH,
+ UVERBS_OBJECT_MR,
+ UVERBS_OBJECT_MW,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_OBJECT_WQ,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_OBJECT_DM,
+};
+
+enum {
+ UVERBS_ATTR_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
+ UVERBS_ATTR_UHW_OUT,
+};
+
+enum uverbs_attrs_create_cq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+};
+
+enum uverbs_attrs_destroy_cq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_ATTR_DESTROY_CQ_RESP,
+};
+
+enum uverbs_attrs_create_flow_action_esp {
+ UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+};
+
+enum uverbs_attrs_destroy_flow_action_esp {
+ UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+};
+
+enum uverbs_methods_cq {
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_METHOD_CQ_DESTROY,
+};
+
+enum uverbs_methods_actions_flow_action_ops {
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
+};
+
+enum uverbs_attrs_alloc_dm_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+};
+
+enum uverbs_attrs_free_dm_cmd_attr_ids {
+ UVERBS_ATTR_FREE_DM_HANDLE,
+};
+
+enum uverbs_methods_dm {
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_METHOD_DM_FREE,
+};
+
+enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
+};
+
+enum uverbs_methods_mr {
+ UVERBS_METHOD_DM_MR_REG,
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 842792eae383..04e46ea517d3 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2017-2018, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,52 +34,69 @@
#ifndef IB_USER_IOCTL_VERBS_H
#define IB_USER_IOCTL_VERBS_H
-#include <rdma/rdma_user_ioctl.h>
-
-#define UVERBS_UDATA_DRIVER_DATA_NS 1
-#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
-
-enum uverbs_default_objects {
- UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
- UVERBS_OBJECT_PD,
- UVERBS_OBJECT_COMP_CHANNEL,
- UVERBS_OBJECT_CQ,
- UVERBS_OBJECT_QP,
- UVERBS_OBJECT_SRQ,
- UVERBS_OBJECT_AH,
- UVERBS_OBJECT_MR,
- UVERBS_OBJECT_MW,
- UVERBS_OBJECT_FLOW,
- UVERBS_OBJECT_XRCD,
- UVERBS_OBJECT_RWQ_IND_TBL,
- UVERBS_OBJECT_WQ,
- UVERBS_OBJECT_LAST,
+#include <linux/types.h>
+
+#ifndef RDMA_UAPI_PTR
+#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
+#endif
+
+enum ib_uverbs_flow_action_esp_keymat {
+ IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM,
};
-enum {
- UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
- UVERBS_UHW_OUT,
+enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo {
+ IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ,
};
-enum uverbs_create_cq_cmd_attr_ids {
- CREATE_CQ_HANDLE,
- CREATE_CQ_CQE,
- CREATE_CQ_USER_HANDLE,
- CREATE_CQ_COMP_CHANNEL,
- CREATE_CQ_COMP_VECTOR,
- CREATE_CQ_FLAGS,
- CREATE_CQ_RESP_CQE,
+struct ib_uverbs_flow_action_esp_keymat_aes_gcm {
+ __aligned_u64 iv;
+ __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */
+
+ __u32 salt;
+ __u32 icv_len;
+
+ __u32 key_len;
+ __u32 aes_key[256 / 32];
};
-enum uverbs_destroy_cq_cmd_attr_ids {
- DESTROY_CQ_HANDLE,
- DESTROY_CQ_RESP,
+enum ib_uverbs_flow_action_esp_replay {
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE,
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP,
};
-enum uverbs_actions_cq_ops {
- UVERBS_CQ_CREATE,
- UVERBS_CQ_DESTROY,
+struct ib_uverbs_flow_action_esp_replay_bmp {
+ __u32 size;
};
-#endif
+enum ib_uverbs_flow_action_esp_flags {
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2,
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3,
+};
+
+struct ib_uverbs_flow_action_esp_encap {
+ /* This struct represents a list of pointers to flow_xxxx_filter that
+ * encapsulates the payload in ESP tunnel mode.
+ */
+ RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */
+ RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr);
+ __u16 len; /* Len of the filter struct val_ptr points to */
+ __u16 type; /* Use flow_spec_type enum */
+};
+
+struct ib_uverbs_flow_action_esp {
+ __u32 spi;
+ __u32 seq;
+ __u32 tfc_pad;
+ __u32 flags;
+ __aligned_u64 hard_limit_pkts;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index 330a3c5f1aa8..ef92118dad97 100644
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
- __u64 data[0];
+ __aligned_u64 data[0];
};
/*
@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 {
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
- __u64 method_mask[2];
+ __aligned_u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 04d0e67b1312..9be07394fdbe 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -117,13 +117,13 @@ enum {
*/
struct ib_uverbs_async_event_desc {
- __u64 element;
+ __aligned_u64 element;
__u32 event_type; /* enum ib_event_type */
__u32 reserved;
};
struct ib_uverbs_comp_event_desc {
- __u64 cq_handle;
+ __aligned_u64 cq_handle;
};
struct ib_uverbs_cq_moderation_caps {
@@ -141,10 +141,7 @@ struct ib_uverbs_cq_moderation_caps {
*/
#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
-#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u
-#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24
-
-#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80
+#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u
struct ib_uverbs_cmd_hdr {
__u32 command;
@@ -153,15 +150,15 @@ struct ib_uverbs_cmd_hdr {
};
struct ib_uverbs_ex_cmd_hdr {
- __u64 response;
+ __aligned_u64 response;
__u16 provider_in_words;
__u16 provider_out_words;
__u32 cmd_hdr_reserved;
};
struct ib_uverbs_get_context {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_get_context_resp {
@@ -170,16 +167,16 @@ struct ib_uverbs_get_context_resp {
};
struct ib_uverbs_query_device {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_device_resp {
- __u64 fw_ver;
+ __aligned_u64 fw_ver;
__be64 node_guid;
__be64 sys_image_guid;
- __u64 max_mr_size;
- __u64 page_size_cap;
+ __aligned_u64 max_mr_size;
+ __aligned_u64 page_size_cap;
__u32 vendor_id;
__u32 vendor_part_id;
__u32 hw_ver;
@@ -224,7 +221,7 @@ struct ib_uverbs_ex_query_device {
};
struct ib_uverbs_odp_caps {
- __u64 general_caps;
+ __aligned_u64 general_caps;
struct {
__u32 rc_odp_caps;
__u32 uc_odp_caps;
@@ -263,21 +260,22 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
- __u64 timestamp_mask;
- __u64 hca_core_clock; /* in KHZ */
- __u64 device_cap_flags_ex;
+ __aligned_u64 timestamp_mask;
+ __aligned_u64 hca_core_clock; /* in KHZ */
+ __aligned_u64 device_cap_flags_ex;
struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ib_uverbs_tm_caps tm_caps;
struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
+ __aligned_u64 max_dm_size;
};
struct ib_uverbs_query_port {
- __u64 response;
+ __aligned_u64 response;
__u8 port_num;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_port_resp {
@@ -305,8 +303,8 @@ struct ib_uverbs_query_port_resp {
};
struct ib_uverbs_alloc_pd {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_alloc_pd_resp {
@@ -318,10 +316,10 @@ struct ib_uverbs_dealloc_pd {
};
struct ib_uverbs_open_xrcd {
- __u64 response;
+ __aligned_u64 response;
__u32 fd;
__u32 oflags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_open_xrcd_resp {
@@ -333,13 +331,13 @@ struct ib_uverbs_close_xrcd {
};
struct ib_uverbs_reg_mr {
- __u64 response;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 response;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_reg_mr_resp {
@@ -349,12 +347,12 @@ struct ib_uverbs_reg_mr_resp {
};
struct ib_uverbs_rereg_mr {
- __u64 response;
+ __aligned_u64 response;
__u32 mr_handle;
__u32 flags;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
};
@@ -369,7 +367,7 @@ struct ib_uverbs_dereg_mr {
};
struct ib_uverbs_alloc_mw {
- __u64 response;
+ __aligned_u64 response;
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
@@ -385,7 +383,7 @@ struct ib_uverbs_dealloc_mw {
};
struct ib_uverbs_create_comp_channel {
- __u64 response;
+ __aligned_u64 response;
};
struct ib_uverbs_create_comp_channel_resp {
@@ -393,13 +391,13 @@ struct ib_uverbs_create_comp_channel_resp {
};
struct ib_uverbs_create_cq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_ex_create_cq_flags {
@@ -408,7 +406,7 @@ enum ib_uverbs_ex_create_cq_flags {
};
struct ib_uverbs_ex_create_cq {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
@@ -429,26 +427,26 @@ struct ib_uverbs_ex_create_cq_resp {
};
struct ib_uverbs_resize_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 cqe;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_resize_cq_resp {
__u32 cqe;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_poll_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 ne;
};
struct ib_uverbs_wc {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 status;
__u32 opcode;
__u32 vendor_err;
@@ -480,7 +478,7 @@ struct ib_uverbs_req_notify_cq {
};
struct ib_uverbs_destroy_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 reserved;
};
@@ -549,8 +547,8 @@ struct ib_uverbs_qp_attr {
};
struct ib_uverbs_create_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -564,7 +562,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type;
__u8 is_srq;
__u8 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_create_qp_mask {
@@ -590,7 +588,7 @@ enum {
};
struct ib_uverbs_ex_create_qp {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -611,13 +609,13 @@ struct ib_uverbs_ex_create_qp {
};
struct ib_uverbs_open_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 qpn;
__u8 qp_type;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
/* also used for open response */
@@ -658,10 +656,10 @@ struct ib_uverbs_qp_dest {
};
struct ib_uverbs_query_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 attr_mask;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_qp_resp {
@@ -695,7 +693,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout;
__u8 sq_sig_all;
__u8 reserved[5];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_modify_qp {
@@ -725,7 +723,7 @@ struct ib_uverbs_modify_qp {
__u8 alt_port_num;
__u8 alt_timeout;
__u8 reserved[2];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_ex_modify_qp {
@@ -743,7 +741,7 @@ struct ib_uverbs_ex_modify_qp_resp {
};
struct ib_uverbs_destroy_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 reserved;
};
@@ -759,13 +757,13 @@ struct ib_uverbs_destroy_qp_resp {
* document the ABI.
*/
struct ib_uverbs_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct ib_uverbs_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -775,14 +773,14 @@ struct ib_uverbs_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
@@ -796,7 +794,7 @@ struct ib_uverbs_send_wr {
};
struct ib_uverbs_post_send {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -809,13 +807,13 @@ struct ib_uverbs_post_send_resp {
};
struct ib_uverbs_recv_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 reserved;
};
struct ib_uverbs_post_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -828,7 +826,7 @@ struct ib_uverbs_post_recv_resp {
};
struct ib_uverbs_post_srq_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 wr_count;
__u32 sge_count;
@@ -841,8 +839,8 @@ struct ib_uverbs_post_srq_recv_resp {
};
struct ib_uverbs_create_ah {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
@@ -861,7 +859,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_detach_mcast {
@@ -869,7 +867,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_flow_spec_hdr {
@@ -877,7 +875,7 @@ struct ib_uverbs_flow_spec_hdr {
__u16 size;
__u16 reserved;
/* followed by flow_spec */
- __u64 flow_spec_data[0];
+ __aligned_u64 flow_spec_data[0];
};
struct ib_uverbs_flow_eth_filter {
@@ -987,6 +985,19 @@ struct ib_uverbs_flow_spec_action_drop {
};
};
+struct ib_uverbs_flow_spec_action_handle {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
@@ -1004,6 +1015,24 @@ struct ib_uverbs_flow_spec_tunnel {
struct ib_uverbs_flow_tunnel_filter mask;
};
+struct ib_uverbs_flow_spec_esp_filter {
+ __u32 spi;
+ __u32 seq;
+};
+
+struct ib_uverbs_flow_spec_esp {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_spec_esp_filter val;
+ struct ib_uverbs_flow_spec_esp_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
@@ -1036,18 +1065,18 @@ struct ib_uverbs_destroy_flow {
};
struct ib_uverbs_create_srq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_xsrq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 srq_type;
__u32 pd_handle;
__u32 max_wr;
@@ -1056,7 +1085,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_srq_resp {
@@ -1071,14 +1100,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask;
__u32 max_wr;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq_resp {
@@ -1089,7 +1118,7 @@ struct ib_uverbs_query_srq_resp {
};
struct ib_uverbs_destroy_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
};
@@ -1101,7 +1130,7 @@ struct ib_uverbs_destroy_srq_resp {
struct ib_uverbs_ex_create_wq {
__u32 comp_mask;
__u32 wq_type;
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 cq_handle;
__u32 max_wr;
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 7f9c37346613..04f64bc4045f 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -59,6 +59,10 @@ struct mlx4_ib_alloc_ucontext_resp_v3 {
__u16 bf_regs_per_page;
};
+enum {
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0,
+};
+
struct mlx4_ib_alloc_ucontext_resp {
__u32 dev_caps;
__u32 qp_tab_size;
@@ -73,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp {
};
struct mlx4_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_cq_resp {
@@ -83,12 +87,12 @@ struct mlx4_ib_create_cq_resp {
};
struct mlx4_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
};
struct mlx4_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_srq_resp {
@@ -97,7 +101,7 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
@@ -106,8 +110,8 @@ struct mlx4_ib_create_qp_rss {
};
struct mlx4_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
@@ -116,8 +120,8 @@ struct mlx4_ib_create_qp {
};
struct mlx4_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
@@ -156,4 +160,32 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
+struct mlx4_ib_rss_caps {
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
+enum query_device_resp_mask {
+ MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
+};
+
+struct mlx4_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported.
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx4_uverbs_ex_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __aligned_u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ __u32 reserved;
+ struct mlx4_ib_rss_caps rss_caps;
+ struct mlx4_ib_tso_caps tso_caps;
+};
+
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 1111aa4e7c1e..cb4a02c4a1ce 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u8 reserved0;
__u16 reserved1;
__u32 reserved2;
- __u64 lib_caps;
+ __aligned_u64 lib_caps;
};
enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -107,6 +107,14 @@ enum mlx5_user_inline_mode {
MLX5_USER_INLINE_MODE_TCP_UDP,
};
+enum {
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -118,14 +126,14 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
- __u16 reserved1;
+ __u16 flow_action_flags;
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
__u8 clock_info_versions;
- __u64 hca_core_clock_offset;
+ __aligned_u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
__u32 num_dyn_bfregs;
@@ -147,7 +155,7 @@ struct mlx5_ib_tso_caps {
};
struct mlx5_ib_rss_caps {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 reserved[7];
};
@@ -163,6 +171,10 @@ struct mlx5_ib_cqe_comp_caps {
__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
};
+enum mlx5_ib_packet_pacing_cap_flags {
+ MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
+};
+
struct mlx5_packet_pacing_caps {
__u32 qp_rate_limit_min;
__u32 qp_rate_limit_max; /* In kpbs */
@@ -172,7 +184,8 @@ struct mlx5_packet_pacing_caps {
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32 supported_qpts;
- __u32 reserved;
+ __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
+ __u8 reserved[3];
};
enum mlx5_ib_mpw_caps {
@@ -243,8 +256,8 @@ enum mlx5_ib_create_cq_flags {
};
struct mlx5_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 cqe_size;
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
@@ -257,15 +270,15 @@ struct mlx5_ib_create_cq_resp {
};
struct mlx5_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 flags;
__u32 reserved0; /* explicit padding (optional on i386) */
__u32 uidx;
@@ -278,8 +291,8 @@ struct mlx5_ib_create_srq_resp {
};
struct mlx5_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
@@ -287,8 +300,8 @@ struct mlx5_ib_create_qp {
__u32 uidx;
__u32 bfreg_index;
union {
- __u64 sq_buf_addr;
- __u64 access_key;
+ __aligned_u64 sq_buf_addr;
+ __aligned_u64 access_key;
};
};
@@ -314,12 +327,13 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
/* Save bits for future fields */
MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 rx_key_len; /* valid only for Toeplitz */
__u8 reserved[6];
@@ -330,6 +344,7 @@ struct mlx5_ib_create_qp_rss {
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
+ __u32 reserved;
};
struct mlx5_ib_alloc_mw {
@@ -344,8 +359,8 @@ enum mlx5_ib_create_wq_mask {
};
struct mlx5_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 user_index;
@@ -362,6 +377,18 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_burst_info {
+ __u32 max_burst_sz;
+ __u16 typical_pkt_sz;
+ __u16 reserved;
+};
+
+struct mlx5_ib_modify_qp {
+ __u32 comp_mask;
+ struct mlx5_ib_burst_info burst_info;
+ __u32 reserved;
+};
+
struct mlx5_ib_modify_qp_resp {
__u32 response_length;
__u32 dctn;
@@ -385,13 +412,13 @@ struct mlx5_ib_modify_wq {
struct mlx5_ib_clock_info {
__u32 sign;
__u32 resv;
- __u64 nsec;
- __u64 cycles;
- __u64 frac;
+ __aligned_u64 nsec;
+ __aligned_u64 cycles;
+ __aligned_u64 frac;
__u32 mult;
__u32 shift;
- __u64 mask;
- __u64 overflow_period;
+ __aligned_u64 mask;
+ __aligned_u64 overflow_period;
};
enum mlx5_ib_mmap_cmd {
@@ -403,6 +430,7 @@ enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_CORE_CLOCK = 5,
MLX5_IB_MMAP_ALLOC_WC = 6,
MLX5_IB_MMAP_CLOCK_INFO = 7,
+ MLX5_IB_MMAP_DEVICE_MEM = 8,
};
enum {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
new file mode 100644
index 000000000000..f7d685ef2d1f
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_CMDS_H
+#define MLX5_USER_IOCTL_CMDS_H
+
+#include <rdma/ib_user_ioctl_cmds.h>
+
+enum mlx5_ib_create_flow_action_attrs {
+ /* This attribute belong to the driver namespace */
+ MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_alloc_dm_attrs {
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+};
+
+#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
new file mode 100644
index 000000000000..8a2fb33f3ed4
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_VERBS_H
+#define MLX5_USER_IOCTL_VERBS_H
+
+#include <linux/types.h>
+
+enum mlx5_ib_uapi_flow_action_flags {
+ MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
+};
+
+#endif
+
diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h
index 3020d8a907a7..ac756cd9e807 100644
--- a/include/uapi/rdma/mthca-abi.h
+++ b/include/uapi/rdma/mthca-abi.h
@@ -74,8 +74,8 @@ struct mthca_reg_mr {
struct mthca_create_cq {
__u32 lkey;
__u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
+ __aligned_u64 arm_db_page;
+ __aligned_u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
@@ -93,7 +93,7 @@ struct mthca_resize_cq {
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;
- __u64 db_page;
+ __aligned_u64 db_page;
};
struct mthca_create_srq_resp {
@@ -104,8 +104,8 @@ struct mthca_create_srq_resp {
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
+ __aligned_u64 sq_db_page;
+ __aligned_u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
index f5b2437aab28..35bfd4015d07 100644
--- a/include/uapi/rdma/nes-abi.h
+++ b/include/uapi/rdma/nes-abi.h
@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp {
};
struct nes_create_cq_req {
- __u64 user_cq_buffer;
+ __aligned_u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
- __u64 user_wqe_buffers;
- __u64 user_qp_buffer;
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_qp_buffer;
};
enum iwnes_memreg_type {
diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h
index ad64a3cea1cd..284d47b41f6e 100644
--- a/include/uapi/rdma/ocrdma-abi.h
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -55,17 +55,17 @@ struct ocrdma_alloc_ucontext_resp {
__u32 wqe_size;
__u32 max_inline_data;
__u32 dpp_wqe_size;
- __u64 ah_tbl_page;
+ __aligned_u64 ah_tbl_page;
__u32 ah_tbl_len;
__u32 rqe_size;
__u8 fw_ver[32];
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_alloc_pd_uresp {
@@ -73,7 +73,7 @@ struct ocrdma_alloc_pd_uresp {
__u32 dpp_enabled;
__u32 dpp_page_addr_hi;
__u32 dpp_page_addr_lo;
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_create_cq_ureq {
@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp {
__u32 page_size;
__u32 num_pages;
__u32 max_hw_cqe;
- __u64 page_addr[MAX_CQ_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 page_addr[MAX_CQ_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 phase_change;
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
#define MAX_QP_PAGES 8
@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp {
__u32 rq_page_size;
__u32 num_sq_pages;
__u32 num_rq_pages;
- __u64 sq_page_addr[MAX_QP_PAGES];
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 sq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 dpp_credit;
__u32 dpp_offset;
@@ -126,8 +126,8 @@ struct ocrdma_create_qp_uresp {
__u32 db_sq_offset;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd[11];
-} __packed;
+ __aligned_u64 rsvd[11];
+};
struct ocrdma_create_srq_uresp {
__u16 rq_dbid;
@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp {
__u32 rq_page_size;
__u32 num_rq_pages;
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 num_rqe_allocated;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd2;
- __u64 rsvd3;
+ __aligned_u64 rsvd2;
+ __aligned_u64 rsvd3;
};
#endif /* OCRDMA_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 261c6db4623e..8ba098900e9a 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -40,7 +40,7 @@
/* user kernel communication data structures. */
struct qedr_alloc_ucontext_resp {
- __u64 db_pa;
+ __aligned_u64 db_pa;
__u32 db_size;
__u32 max_send_wr;
@@ -53,24 +53,27 @@ struct qedr_alloc_ucontext_resp {
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
+ __u32 reserved;
};
struct qedr_alloc_pd_ureq {
- __u64 rsvd1;
+ __aligned_u64 rsvd1;
};
struct qedr_alloc_pd_uresp {
__u32 pd_id;
+ __u32 reserved;
};
struct qedr_create_cq_ureq {
- __u64 addr;
- __u64 len;
+ __aligned_u64 addr;
+ __aligned_u64 len;
};
struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
+ __u16 reserved;
};
struct qedr_create_qp_ureq {
@@ -79,17 +82,17 @@ struct qedr_create_qp_ureq {
/* SQ */
/* user space virtual address of SQ buffer */
- __u64 sq_addr;
+ __aligned_u64 sq_addr;
/* length of SQ buffer */
- __u64 sq_len;
+ __aligned_u64 sq_len;
/* RQ */
/* user space virtual address of RQ buffer */
- __u64 rq_addr;
+ __aligned_u64 rq_addr;
/* length of RQ buffer */
- __u64 rq_len;
+ __aligned_u64 rq_len;
};
struct qedr_create_qp_uresp {
@@ -105,6 +108,7 @@ struct qedr_create_qp_uresp {
__u16 rq_icid;
__u32 rq_db2_offset;
+ __u32 reserved;
};
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 4c77e2a7b07e..0ce0943fc808 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -238,6 +238,14 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+ RDMA_NLDEV_CMD_RES_CM_ID_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_CQ_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_MR_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -350,6 +358,49 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_CM_ID, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, /* nested table */
+ /*
+ * rdma_cm_id port space.
+ */
+ RDMA_NLDEV_ATTR_RES_PS, /* u32 */
+ /*
+ * Source and destination socket addresses
+ */
+ RDMA_NLDEV_ATTR_RES_SRC_ADDR, /* __kernel_sockaddr_storage */
+ RDMA_NLDEV_ATTR_RES_DST_ADDR, /* __kernel_sockaddr_storage */
+
+ RDMA_NLDEV_ATTR_RES_CQ, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQ_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQE, /* u32 */
+ RDMA_NLDEV_ATTR_RES_USECNT, /* u64 */
+ RDMA_NLDEV_ATTR_RES_POLL_CTX, /* u8 */
+
+ RDMA_NLDEV_ATTR_RES_MR, /* nested table */
+ RDMA_NLDEV_ATTR_RES_MR_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_RKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_IOVA, /* u64 */
+ RDMA_NLDEV_ATTR_RES_MRLEN, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_PD, /* nested table */
+ RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */
+
+ /*
+ * Provides logical name and index of netdevice which is
+ * connected to physical port. This information is relevant
+ * for RoCE and iWARP.
+ *
+ * The netdevices which are associated with containers are
+ * supposed to be exported together with GID table once it
+ * will be exposed through the netlink. Because the
+ * associated netdevices are properties of GIDs.
+ */
+ RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_NDEV_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index c83ef0026079..e1269024af47 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -70,6 +70,14 @@ enum {
RDMA_USER_CM_CMD_JOIN_MCAST
};
+/* See IBTA Annex A11, servies ID bytes 4 & 5 */
+enum rdma_ucm_port_space {
+ RDMA_PS_IPOIB = 0x0002,
+ RDMA_PS_IB = 0x013F,
+ RDMA_PS_TCP = 0x0106,
+ RDMA_PS_UDP = 0x0111,
+};
+
/*
* command ABI structures.
*/
@@ -80,9 +88,9 @@ struct rdma_ucm_cmd_hdr {
};
struct rdma_ucm_create_id {
- __u64 uid;
- __u64 response;
- __u16 ps;
+ __aligned_u64 uid;
+ __aligned_u64 response;
+ __u16 ps; /* use enum rdma_ucm_port_space */
__u8 qp_type;
__u8 reserved[5];
};
@@ -92,7 +100,7 @@ struct rdma_ucm_create_id_resp {
};
struct rdma_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -102,7 +110,7 @@ struct rdma_ucm_destroy_id_resp {
};
struct rdma_ucm_bind_ip {
- __u64 response;
+ __aligned_u64 response;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -143,13 +151,13 @@ enum {
};
struct rdma_ucm_query {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 option;
};
struct rdma_ucm_query_route_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
@@ -159,7 +167,7 @@ struct rdma_ucm_query_route_resp {
};
struct rdma_ucm_query_addr_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
__u8 port_num;
__u8 reserved;
__u16 pkey;
@@ -210,7 +218,7 @@ struct rdma_ucm_listen {
};
struct rdma_ucm_accept {
- __u64 uid;
+ __aligned_u64 uid;
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
@@ -228,7 +236,7 @@ struct rdma_ucm_disconnect {
};
struct rdma_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -239,8 +247,8 @@ struct rdma_ucm_notify {
};
struct rdma_ucm_join_ip_mcast {
- __u64 response; /* rdma_ucm_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucm_create_id_resp */
+ __aligned_u64 uid;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -253,8 +261,8 @@ enum {
};
struct rdma_ucm_join_mcast {
- __u64 response; /* rdma_ucma_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucma_create_id_resp */
+ __aligned_u64 uid;
__u32 id;
__u16 addr_size;
__u16 join_flags;
@@ -262,18 +270,23 @@ struct rdma_ucm_join_mcast {
};
struct rdma_ucm_get_event {
- __u64 response;
+ __aligned_u64 response;
};
struct rdma_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 status;
+ /*
+ * NOTE: This union is not aligned to 8 bytes so none of the union
+ * members may contain a u64 or anything with higher alignment than 4.
+ */
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
} param;
+ __u32 reserved;
};
/* Option levels */
@@ -291,7 +304,7 @@ enum {
};
struct rdma_ucm_set_option {
- __u64 optval;
+ __aligned_u64 optval;
__u32 id;
__u32 level;
__u32 optname;
@@ -299,7 +312,7 @@ struct rdma_ucm_set_option {
};
struct rdma_ucm_migrate_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 fd;
};
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index 46de0885e800..d223f4164a0f 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -34,49 +34,13 @@
#ifndef RDMA_USER_IOCTL_H
#define RDMA_USER_IOCTL_H
-#include <linux/types.h>
-#include <linux/ioctl.h>
#include <rdma/ib_user_mad.h>
#include <rdma/hfi/hfi1_ioctl.h>
+#include <rdma/rdma_user_ioctl_cmds.h>
-/* Documentation/ioctl/ioctl-number.txt */
-#define RDMA_IOCTL_MAGIC 0x1b
/* Legacy name, for user space application which already use it */
#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC
-#define RDMA_VERBS_IOCTL \
- _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
-
-#define UVERBS_ID_NS_MASK 0xF000
-#define UVERBS_ID_NS_SHIFT 12
-
-enum {
- /* User input */
- UVERBS_ATTR_F_MANDATORY = 1U << 0,
- /*
- * Valid output bit should be ignored and considered set in
- * mandatory fields. This bit is kernel output.
- */
- UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
-};
-
-struct ib_uverbs_attr {
- __u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
- __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
- __u16 reserved;
- __aligned_u64 data; /* ptr to command, inline data or idr/fd */
-};
-
-struct ib_uverbs_ioctl_hdr {
- __u16 length;
- __u16 object_id;
- __u16 method_id;
- __u16 num_attrs;
- __aligned_u64 reserved;
- struct ib_uverbs_attr attrs[0];
-};
-
/*
* General blocks assignments
* It is closed on purpose do not expose it it user space
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
new file mode 100644
index 000000000000..1da5a1e1f3a8
--- /dev/null
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_IOCTL_CMDS_H
+#define RDMA_USER_IOCTL_CMDS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Documentation/ioctl/ioctl-number.txt */
+#define RDMA_IOCTL_MAGIC 0x1b
+#define RDMA_VERBS_IOCTL \
+ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
+
+enum {
+ /* User input */
+ UVERBS_ATTR_F_MANDATORY = 1U << 0,
+ /*
+ * Valid output bit should be ignored and considered set in
+ * mandatory fields. This bit is kernel output.
+ */
+ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
+};
+
+struct ib_uverbs_attr {
+ __u16 attr_id; /* command specific type attribute */
+ __u16 len; /* only for pointers */
+ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
+ union {
+ struct {
+ __u8 elem_id;
+ __u8 reserved;
+ } enum_data;
+ __u16 reserved;
+ } attr_data;
+ __aligned_u64 data; /* ptr to command, inline data or idr/fd */
+};
+
+struct ib_uverbs_ioctl_hdr {
+ __u16 length;
+ __u16 object_id;
+ __u16 method_id;
+ __u16 num_attrs;
+ __aligned_u64 reserved1;
+ __u32 driver_id;
+ __u32 reserved2;
+ struct ib_uverbs_attr attrs[0];
+};
+
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+};
+
+#endif
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index bdeea948b2f3..1f8a9e7daea4 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -35,6 +35,9 @@
#define RDMA_USER_RXE_H
#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
union rxe_gid {
__u8 raw[16];
@@ -55,16 +58,17 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
__u8 network_type;
+ __u16 reserved1;
+ __u32 reserved2;
struct rxe_global_route grh;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
};
struct rxe_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -74,36 +78,42 @@ struct rxe_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
+ __u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
+ __u32 reserved;
} atomic;
struct {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
} ud;
+ /* reg is only used by the kernel and is not part of the uapi */
struct {
- struct ib_mr *mr;
+ union {
+ struct ib_mr *mr;
+ __aligned_u64 reserved;
+ };
__u32 key;
- int access;
+ __u32 access;
} reg;
} wr;
};
struct rxe_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct mminfo {
- __u64 offset;
+ __aligned_u64 offset;
__u32 size;
__u32 pad;
};
@@ -114,6 +124,7 @@ struct rxe_dma_info {
__u32 cur_sge;
__u32 num_sge;
__u32 sge_offset;
+ __u32 reserved;
union {
__u8 inline_data[0];
struct rxe_sge sge[0];
@@ -125,7 +136,7 @@ struct rxe_send_wqe {
struct rxe_av av;
__u32 status;
__u32 state;
- __u64 iova;
+ __aligned_u64 iova;
__u32 mask;
__u32 first_psn;
__u32 last_psn;
@@ -136,10 +147,33 @@ struct rxe_send_wqe {
};
struct rxe_recv_wqe {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 padding;
struct rxe_dma_info dma;
};
+struct rxe_create_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_resize_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_create_qp_resp {
+ struct mminfo rq_mi;
+ struct mminfo sq_mi;
+};
+
+struct rxe_create_srq_resp {
+ struct mminfo mi;
+ __u32 srq_num;
+ __u32 reserved;
+};
+
+struct rxe_modify_srq_cmd {
+ __aligned_u64 mmap_info_addr;
+};
+
#endif /* RDMA_USER_RXE_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 02ca0d0f1eb7..d13fd490b66d 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp {
};
struct pvrdma_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp {
};
struct pvrdma_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
struct pvrdma_create_srq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp {
};
struct pvrdma_create_qp {
- __u64 rbuf_addr;
- __u64 sbuf_addr;
+ __aligned_u64 rbuf_addr;
+ __aligned_u64 sbuf_addr;
__u32 rbuf_size;
__u32 sbuf_size;
- __u64 qp_addr;
+ __aligned_u64 qp_addr;
};
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
- __u64 swap_val;
- __u64 compare_val;
- __u64 swap_mask;
- __u64 compare_mask;
+ __aligned_u64 swap_val;
+ __aligned_u64 compare_val;
+ __aligned_u64 swap_mask;
+ __aligned_u64 compare_mask;
};
/* PVRDMA masked atomic fetch and add */
struct pvrdma_ex_fetch_add {
- __u64 add_val;
- __u64 field_boundary;
+ __aligned_u64 add_val;
+ __aligned_u64 field_boundary;
};
/* PVRDMA address vector. */
@@ -207,14 +207,14 @@ struct pvrdma_av {
/* PVRDMA scatter/gather entry */
struct pvrdma_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
/* PVRDMA receive queue work request */
struct pvrdma_rq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
};
@@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr {
/* PVRDMA send queue work request */
struct pvrdma_sq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
__u32 opcode; /* operation type */
@@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr {
__u32 reserved;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u8 reserved[4];
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 log_arg_sz;
__u32 rkey;
union {
@@ -255,13 +255,14 @@ struct pvrdma_sq_wqe_hdr {
} wr_data;
} masked_atomics;
struct {
- __u64 iova_start;
- __u64 pl_pdir_dma;
+ __aligned_u64 iova_start;
+ __aligned_u64 pl_pdir_dma;
__u32 page_shift;
__u32 page_list_len;
__u32 length;
__u32 access_flags;
__u32 rkey;
+ __u32 reserved;
} fast_reg;
struct {
__u32 remote_qpn;
@@ -274,8 +275,8 @@ struct pvrdma_sq_wqe_hdr {
/* Completion queue element. */
struct pvrdma_cqe {
- __u64 wr_id;
- __u64 qp;
+ __aligned_u64 wr_id;
+ __aligned_u64 qp;
__u32 opcode;
__u32 status;
__u32 byte_len;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 07d61583fd02..ed0a120d4f08 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
+#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE