aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2017-05-29 19:54:21 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2017-05-29 19:54:21 -0700
commitd8f797c60661a90ee26ca9330cf85ede9aa2ec17 (patch)
tree5038609885fc3e4cb7f329d974875ac4411c6af5 /include/linux
parentInput: tsc2007 - move header file out of I2C realm (diff)
parentLinux 4.12-rc3 (diff)
downloadwireguard-linux-d8f797c60661a90ee26ca9330cf85ede9aa2ec17.tar.xz
wireguard-linux-d8f797c60661a90ee26ca9330cf85ede9aa2ec17.zip
Merge tag 'v4.12-rc3' into next
Sync with mainline to bring in changes in platform drovers dropping calls to sparse_keymap_free() so that we can remove it for good.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h72
-rw-r--r--include/linux/acpi_iort.h6
-rw-r--r--include/linux/amba/pl080.h50
-rw-r--r--include/linux/amba/pl330.h35
-rw-r--r--include/linux/amigaffs.h144
-rw-r--r--include/linux/ata.h5
-rw-r--r--include/linux/atmel_serial.h169
-rw-r--r--include/linux/atomic.h46
-rw-r--r--include/linux/audit.h7
-rw-r--r--include/linux/backing-dev-defs.h8
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blk-mq.h31
-rw-r--r--include/linux/blk_types.h47
-rw-r--r--include/linux/blkdev.h130
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/bpf_types.h36
-rw-r--r--include/linux/bpf_verifier.h9
-rw-r--r--include/linux/brcmphy.h3
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/bug.h2
-rw-r--r--include/linux/can/core.h11
-rw-r--r--include/linux/can/dev/peak_canfd.h308
-rw-r--r--include/linux/can/platform/ti_hecc.h44
-rw-r--r--include/linux/ccp.h68
-rw-r--r--include/linux/cdev.h5
-rw-r--r--include/linux/ceph/ceph_debug.h6
-rw-r--r--include/linux/ceph/ceph_features.h4
-rw-r--r--include/linux/ceph/ceph_fs.h14
-rw-r--r--include/linux/ceph/cls_lock_client.h5
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/ceph/mdsmap.h7
-rw-r--r--include/linux/ceph/osd_client.h7
-rw-r--r--include/linux/ceph/pagelist.h6
-rw-r--r--include/linux/cgroup-defs.h12
-rw-r--r--include/linux/cgroup.h29
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clk/tegra.h3
-rw-r--r--include/linux/clk/ti.h55
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cma.h6
-rw-r--r--include/linux/coda_psdev.h1
-rw-r--r--include/linux/compat.h14
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/coresight.h2
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crash_core.h69
-rw-r--r--include/linux/crypto.h2
-rw-r--r--include/linux/cryptohash.h5
-rw-r--r--include/linux/dax.h81
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/dell-led.h6
-rw-r--r--include/linux/devfreq.h30
-rw-r--r--include/linux/devfreq_cooling.h19
-rw-r--r--include/linux/device-mapper.h47
-rw-r--r--include/linux/dma-buf.h22
-rw-r--r--include/linux/dma-fence-array.h2
-rw-r--r--include/linux/dma-fence.h4
-rw-r--r--include/linux/dma-iommu.h6
-rw-r--r--include/linux/dma-mapping.h12
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/edac.h30
-rw-r--r--include/linux/efi-bgrt.h5
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/elevator.h14
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/etherdevice.h15
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/extcon.h21
-rw-r--r--include/linux/f2fs_fs.h17
-rw-r--r--include/linux/fcntl.h6
-rw-r--r--include/linux/filter.h30
-rw-r--r--include/linux/firmware/meson/meson_sm.h4
-rw-r--r--include/linux/flex_array.h67
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h29
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/fscrypt_common.h11
-rw-r--r--include/linux/fscrypt_notsupp.h9
-rw-r--r--include/linux/fscrypt_supp.h74
-rw-r--r--include/linux/fsnotify_backend.h95
-rw-r--r--include/linux/ftrace.h94
-rw-r--r--include/linux/fwnode.h12
-rw-r--r--include/linux/genhd.h12
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio/consumer.h12
-rw-r--r--include/linux/gpio/driver.h6
-rw-r--r--include/linux/gpio/gpio-reg.h13
-rw-r--r--include/linux/hdmi.h1
-rw-r--r--include/linux/hid-sensor-hub.h2
-rw-r--r--include/linux/hid-sensor-ids.h8
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hiddev.h12
-rw-r--r--include/linux/host1x.h1
-rw-r--r--include/linux/hrtimer.h6
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/hyperv.h127
-rw-r--r--include/linux/i2c.h10
-rw-r--r--include/linux/i2c/i2c-hid.h6
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h77
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_vlan.h18
-rw-r--r--include/linux/inet.h6
-rw-r--r--include/linux/inetdevice.h4
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/init_task.h17
-rw-r--r--include/linux/intel-iommu.h18
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/iommu.h58
-rw-r--r--include/linux/iova.h91
-rw-r--r--include/linux/ipc.h7
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h14
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/irqchip/mips-gic.h1
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/jiffies.h11
-rw-r--r--include/linux/kbuild.h6
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kexec.h57
-rw-r--r--include/linux/key-type.h8
-rw-r--r--include/linux/key.h39
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kref.h6
-rw-r--r--include/linux/ksm.h5
-rw-r--r--include/linux/kvm_host.h92
-rw-r--r--include/linux/leds-pca9532.h4
-rw-r--r--include/linux/leds.h14
-rw-r--r--include/linux/libnvdimm.h8
-rw-r--r--include/linux/lightnvm.h13
-rw-r--r--include/linux/livepatch.h68
-rw-r--r--include/linux/lockd/bind.h24
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/lsm_hooks.h34
-rw-r--r--include/linux/mailbox/brcm-message.h14
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/memcontrol.h201
-rw-r--r--include/linux/mfd/arizona/pdata.h7
-rw-r--r--include/linux/mfd/axp20x.h44
-rw-r--r--include/linux/mfd/cros_ec.h21
-rw-r--r--include/linux/mfd/da9062/core.h29
-rw-r--r--include/linux/mfd/da9062/registers.h5
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h (renamed from include/linux/mfd/intel_bxtwc.h)4
-rw-r--r--include/linux/mfd/motorola-cpcap.h5
-rw-r--r--include/linux/mfd/mxs-lradc.h187
-rw-r--r--include/linux/mfd/stm32-timers.h2
-rw-r--r--include/linux/mfd/sun4i-gpadc.h6
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h237
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h33
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h4
-rw-r--r--include/linux/mfd/ti-lmu-register.h280
-rw-r--r--include/linux/mfd/ti-lmu.h87
-rw-r--r--include/linux/mfd/wm831x/core.h9
-rw-r--r--include/linux/mg_disk.h45
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mlx4/device.h5
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/fs.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h153
-rw-r--r--include/linux/mlx5/qp.h10
-rw-r--r--include/linux/mm.h41
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmc/card.h10
-rw-r--r--include/linux/mmc/host.h6
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/module.h12
-rw-r--r--include/linux/moduleparam.h65
-rw-r--r--include/linux/mpls.h5
-rw-r--r--include/linux/mtd/mtd.h7
-rw-r--r--include/linux/mtd/nand.h96
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/nd.h12
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h8
-rw-r--r--include/linux/netdevice.h66
-rw-r--r--include/linux/netfilter/nfnetlink.h5
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_bridge/ebtables.h11
-rw-r--r--include/linux/netlink.h38
-rw-r--r--include/linux/nfs_fs.h17
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_page.h5
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/nvme-fc-driver.h114
-rw-r--r--include/linux/nvme-fc.h68
-rw-r--r--include/linux/nvme.h29
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_device.h17
-rw-r--r--include/linux/of_fdt.h6
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_mdio.h4
-rw-r--r--include/linux/of_pci.h11
-rw-r--r--include/linux/of_platform.h12
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci-ecam.h3
-rw-r--r--include/linux/pci-ep-cfs.h41
-rw-r--r--include/linux/pci-epc.h144
-rw-r--r--include/linux/pci-epf.h162
-rw-r--r--include/linux/pci.h122
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pe.h177
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/perf/arm_pmu.h29
-rw-r--r--include/linux/perf_event.h17
-rw-r--r--include/linux/phy.h109
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/platform_data/iommu-omap.h20
-rw-r--r--include/linux/platform_data/isl9305.h2
-rw-r--r--include/linux/platform_data/itco_wdt.h4
-rw-r--r--include/linux/platform_data/pn544.h43
-rw-r--r--include/linux/platform_data/st21nfca.h33
-rw-r--r--include/linux/platform_data/video-imxfb.h1
-rw-r--r--include/linux/pm_domain.h2
-rw-r--r--include/linux/pm_wakeup.h25
-rw-r--r--include/linux/pmem.h23
-rw-r--r--include/linux/poll.h56
-rw-r--r--include/linux/posix-clock.h10
-rw-r--r--include/linux/posix-timers.h20
-rw-r--r--include/linux/power/bq24190_charger.h16
-rw-r--r--include/linux/power/max17042_battery.h9
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/property.h26
-rw-r--r--include/linux/pstore.h156
-rw-r--r--include/linux/ptr_ring.h63
-rw-r--r--include/linux/ptrace.h7
-rw-r--r--include/linux/qcom_scm.h6
-rw-r--r--include/linux/qed/common_hsi.h30
-rw-r--r--include/linux/qed/eth_common.h3
-rw-r--r--include/linux/qed/fcoe_common.h180
-rw-r--r--include/linux/qed/iscsi_common.h241
-rw-r--r--include/linux/qed/qed_eth_if.h32
-rw-r--r--include/linux/qed/qed_if.h64
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_roce_if.h2
-rw-r--r--include/linux/qed/rdma_common.h3
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h30
-rw-r--r--include/linux/qed/tcp_common.h1
-rw-r--r--include/linux/quotaops.h1
-rw-r--r--include/linux/ras.h13
-rw-r--r--include/linux/rcu_node_tree.h99
-rw-r--r--include/linux/rcu_segcblist.h90
-rw-r--r--include/linux/rculist.h3
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/rcutiny.h24
-rw-r--r--include/linux/rcutree.h5
-rw-r--r--include/linux/refcount.h19
-rw-r--r--include/linux/regulator/arizona-ldo1.h24
-rw-r--r--include/linux/regulator/arizona-micsupp.h21
-rw-r--r--include/linux/regulator/consumer.h1
-rw-r--r--include/linux/regulator/driver.h18
-rw-r--r--include/linux/regulator/machine.h3
-rw-r--r--include/linux/regulator/pfuze100.h1
-rw-r--r--include/linux/reservation.h20
-rw-r--r--include/linux/reset.h22
-rw-r--r--include/linux/rhashtable.h68
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rmap.h47
-rw-r--r--include/linux/rodata_test.h1
-rw-r--r--include/linux/rpmsg/qcom_smd.h2
-rw-r--r--include/linux/sbitmap.h55
-rw-r--r--include/linux/sched.h31
-rw-r--r--include/linux/sched/mm.h38
-rw-r--r--include/linux/sched/rt.h23
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/security.h20
-rw-r--r--include/linux/sem.h3
-rw-r--r--include/linux/serdev.h73
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/smp.h12
-rw-r--r--include/linux/soc/qcom/smd.h139
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h11
-rw-r--r--include/linux/soc/renesas/rcar-rst.h5
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h16
-rw-r--r--include/linux/sock_diag.h1
-rw-r--r--include/linux/spi/spi.h10
-rw-r--r--include/linux/splice.h4
-rw-r--r--include/linux/srcu.h84
-rw-r--r--include/linux/srcuclassic.h115
-rw-r--r--include/linux/srcutiny.h93
-rw-r--r--include/linux/srcutree.h150
-rw-r--r--include/linux/stacktrace.h9
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/stmmac.h41
-rw-r--r--include/linux/string.h18
-rw-r--r--include/linux/sunrpc/rpc_rdma.h3
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/sunrpc/svc_rdma.h75
-rw-r--r--include/linux/suspend.h7
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/t10-pi.h8
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/tee_drv.h277
-rw-r--r--include/linux/thread_info.h16
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/tpm.h3
-rw-r--r--include/linux/trace_events.h11
-rw-r--r--include/linux/tracepoint.h19
-rw-r--r--include/linux/tty.h22
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uaccess.h198
-rw-r--r--include/linux/udp.h2
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/uio_driver.h13
-rw-r--r--include/linux/usb.h73
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/hcd.h8
-rw-r--r--include/linux/usb/of.h5
-rw-r--r--include/linux/usb/otg-fsm.h15
-rw-r--r--include/linux/usb/serial.h42
-rw-r--r--include/linux/usb/typec.h243
-rw-r--r--include/linux/usb/usbnet.h13
-rw-r--r--include/linux/usb/xhci-dbgp.h29
-rw-r--r--include/linux/virtio.h14
-rw-r--r--include/linux/virtio_config.h25
-rw-r--r--include/linux/virtio_ring.h3
-rw-r--r--include/linux/virtio_vsock.h1
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmalloc.h11
-rw-r--r--include/linux/vme.h12
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h1
347 files changed, 7166 insertions, 3074 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 9b05886f9773..137e4a3d89c5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -233,10 +233,6 @@ int acpi_numa_init (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_parse_entries(char *id, unsigned long table_size,
- acpi_tbl_entry_handler handler,
- struct acpi_table_header *table_header,
- int entry_id, unsigned int max_entries);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -595,6 +591,13 @@ enum acpi_reconfig_event {
int acpi_reconfig_notifier_register(struct notifier_block *nb);
int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
+#ifdef CONFIG_ACPI_GTDT
+int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
+int acpi_gtdt_map_ppi(int type);
+bool acpi_gtdt_c3stop(int type);
+int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -611,6 +614,11 @@ static inline bool acpi_dev_found(const char *hid)
return false;
}
+static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
+{
+ return false;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -762,8 +770,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline void acpi_dma_configure(struct device *dev,
- enum dev_dma_attr attr) { }
+static inline int acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr)
+{
+ return 0;
+}
static inline void acpi_dma_deconfigure(struct device *dev) { }
@@ -949,6 +960,10 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
adev->driver_gpios = NULL;
}
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios);
+void devm_acpi_dev_remove_driver_gpios(struct device *dev);
+
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
@@ -958,6 +973,13 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
return -ENXIO;
@@ -997,8 +1019,16 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval);
-struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
- struct fwnode_handle *subnode);
+struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
+struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode);
+
+struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev);
+int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle **remote,
+ struct fwnode_handle **port,
+ struct fwnode_handle **endpoint);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1115,12 +1145,34 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev,
return -ENXIO;
}
-static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
- struct fwnode_handle *subnode)
+static inline struct fwnode_handle *
+acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child)
{
return NULL;
}
+static inline struct fwnode_handle *
+acpi_node_get_parent(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct fwnode_handle *
+acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int
+acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle **remote,
+ struct fwnode_handle **port,
+ struct fwnode_handle **endpoint)
+{
+ return -ENXIO;
+}
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 77e08099e554..3ff9acea8616 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -34,6 +34,8 @@ void acpi_iort_init(void);
bool iort_node_match(u8 type);
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+void acpi_configure_pmsi_domain(struct device *dev);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
void iort_set_dma_mask(struct device *dev);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
@@ -45,6 +47,7 @@ static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
u32 req_id)
{ return NULL; }
+static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
static inline void iort_set_dma_mask(struct device *dev) { }
static inline
@@ -52,7 +55,4 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
#endif
-#define IORT_ACPI_DECLARE(name, table_id, fn) \
- ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
-
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 91b84a7f0539..580b5323a717 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -38,24 +38,16 @@
#define PL080_SOFT_LSREQ (0x2C)
#define PL080_CONFIG (0x30)
-#define PL080_CONFIG_M2_BE (1 << 2)
-#define PL080_CONFIG_M1_BE (1 << 1)
-#define PL080_CONFIG_ENABLE (1 << 0)
+#define PL080_CONFIG_M2_BE BIT(2)
+#define PL080_CONFIG_M1_BE BIT(1)
+#define PL080_CONFIG_ENABLE BIT(0)
#define PL080_SYNC (0x34)
/* Per channel configuration registers */
-#define PL080_Cx_STRIDE (0x20)
+/* Per channel configuration registers */
#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
-#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
-#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
-#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
-
#define PL080_CH_SRC_ADDR (0x00)
#define PL080_CH_DST_ADDR (0x04)
#define PL080_CH_LLI (0x08)
@@ -66,18 +58,18 @@
#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
#define PL080_LLI_ADDR_SHIFT (2)
-#define PL080_LLI_LM_AHB2 (1 << 0)
+#define PL080_LLI_LM_AHB2 BIT(0)
-#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
+#define PL080_CONTROL_TC_IRQ_EN BIT(31)
#define PL080_CONTROL_PROT_MASK (0x7 << 28)
#define PL080_CONTROL_PROT_SHIFT (28)
-#define PL080_CONTROL_PROT_CACHE (1 << 30)
-#define PL080_CONTROL_PROT_BUFF (1 << 29)
-#define PL080_CONTROL_PROT_SYS (1 << 28)
-#define PL080_CONTROL_DST_INCR (1 << 27)
-#define PL080_CONTROL_SRC_INCR (1 << 26)
-#define PL080_CONTROL_DST_AHB2 (1 << 25)
-#define PL080_CONTROL_SRC_AHB2 (1 << 24)
+#define PL080_CONTROL_PROT_CACHE BIT(30)
+#define PL080_CONTROL_PROT_BUFF BIT(29)
+#define PL080_CONTROL_PROT_SYS BIT(28)
+#define PL080_CONTROL_DST_INCR BIT(27)
+#define PL080_CONTROL_SRC_INCR BIT(26)
+#define PL080_CONTROL_DST_AHB2 BIT(25)
+#define PL080_CONTROL_SRC_AHB2 BIT(24)
#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
#define PL080_CONTROL_DWIDTH_SHIFT (21)
#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
@@ -103,20 +95,20 @@
#define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2)
-#define PL080N_CONFIG_ITPROT (1 << 20)
-#define PL080N_CONFIG_SECPROT (1 << 19)
-#define PL080_CONFIG_HALT (1 << 18)
-#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
-#define PL080_CONFIG_LOCK (1 << 16)
-#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
-#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
+#define PL080N_CONFIG_ITPROT BIT(20)
+#define PL080N_CONFIG_SECPROT BIT(19)
+#define PL080_CONFIG_HALT BIT(18)
+#define PL080_CONFIG_ACTIVE BIT(17) /* RO */
+#define PL080_CONFIG_LOCK BIT(16)
+#define PL080_CONFIG_TC_IRQ_MASK BIT(15)
+#define PL080_CONFIG_ERR_IRQ_MASK BIT(14)
#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
#define PL080_CONFIG_DST_SEL_SHIFT (6)
#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
#define PL080_CONFIG_SRC_SEL_SHIFT (1)
-#define PL080_CONFIG_ENABLE (1 << 0)
+#define PL080_CONFIG_ENABLE BIT(0)
#define PL080_FLOW_MEM2MEM (0x0)
#define PL080_FLOW_MEM2PER (0x1)
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
deleted file mode 100644
index fe93758e8403..000000000000
--- a/include/linux/amba/pl330.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/include/linux/amba/pl330.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __AMBA_PL330_H_
-#define __AMBA_PL330_H_
-
-#include <linux/dmaengine.h>
-
-struct dma_pl330_platdata {
- /*
- * Number of valid peripherals connected to DMAC.
- * This may be different from the value read from
- * CR0, as the PL330 implementation might have 'holes'
- * in the peri list or the peri could also be reached
- * from another DMAC which the platform prefers.
- */
- u8 nr_valid_peri;
- /* Array of valid peripherals */
- u8 *peri_id;
- /* Operational capabilities */
- dma_cap_mask_t cap_mask;
- /* Bytes to allocate for MC buffer */
- unsigned mcbuf_sz;
-};
-
-extern bool pl330_filter(struct dma_chan *chan, void *param);
-#endif /* __AMBA_PL330_H_ */
diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h
deleted file mode 100644
index 43b41c06aa37..000000000000
--- a/include/linux/amigaffs.h
+++ /dev/null
@@ -1,144 +0,0 @@
-#ifndef AMIGAFFS_H
-#define AMIGAFFS_H
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define FS_OFS 0x444F5300
-#define FS_FFS 0x444F5301
-#define FS_INTLOFS 0x444F5302
-#define FS_INTLFFS 0x444F5303
-#define FS_DCOFS 0x444F5304
-#define FS_DCFFS 0x444F5305
-#define MUFS_FS 0x6d754653 /* 'muFS' */
-#define MUFS_OFS 0x6d754600 /* 'muF\0' */
-#define MUFS_FFS 0x6d754601 /* 'muF\1' */
-#define MUFS_INTLOFS 0x6d754602 /* 'muF\2' */
-#define MUFS_INTLFFS 0x6d754603 /* 'muF\3' */
-#define MUFS_DCOFS 0x6d754604 /* 'muF\4' */
-#define MUFS_DCFFS 0x6d754605 /* 'muF\5' */
-
-#define T_SHORT 2
-#define T_LIST 16
-#define T_DATA 8
-
-#define ST_LINKFILE -4
-#define ST_FILE -3
-#define ST_ROOT 1
-#define ST_USERDIR 2
-#define ST_SOFTLINK 3
-#define ST_LINKDIR 4
-
-#define AFFS_ROOT_BMAPS 25
-
-struct affs_date {
- __be32 days;
- __be32 mins;
- __be32 ticks;
-};
-
-struct affs_short_date {
- __be16 days;
- __be16 mins;
- __be16 ticks;
-};
-
-struct affs_root_head {
- __be32 ptype;
- __be32 spare1;
- __be32 spare2;
- __be32 hash_size;
- __be32 spare3;
- __be32 checksum;
- __be32 hashtable[1];
-};
-
-struct affs_root_tail {
- __be32 bm_flag;
- __be32 bm_blk[AFFS_ROOT_BMAPS];
- __be32 bm_ext;
- struct affs_date root_change;
- u8 disk_name[32];
- __be32 spare1;
- __be32 spare2;
- struct affs_date disk_change;
- struct affs_date disk_create;
- __be32 spare3;
- __be32 spare4;
- __be32 dcache;
- __be32 stype;
-};
-
-struct affs_head {
- __be32 ptype;
- __be32 key;
- __be32 block_count;
- __be32 spare1;
- __be32 first_data;
- __be32 checksum;
- __be32 table[1];
-};
-
-struct affs_tail {
- __be32 spare1;
- __be16 uid;
- __be16 gid;
- __be32 protect;
- __be32 size;
- u8 comment[92];
- struct affs_date change;
- u8 name[32];
- __be32 spare2;
- __be32 original;
- __be32 link_chain;
- __be32 spare[5];
- __be32 hash_chain;
- __be32 parent;
- __be32 extension;
- __be32 stype;
-};
-
-struct slink_front
-{
- __be32 ptype;
- __be32 key;
- __be32 spare1[3];
- __be32 checksum;
- u8 symname[1]; /* depends on block size */
-};
-
-struct affs_data_head
-{
- __be32 ptype;
- __be32 key;
- __be32 sequence;
- __be32 size;
- __be32 next;
- __be32 checksum;
- u8 data[1]; /* depends on block size */
-};
-
-/* Permission bits */
-
-#define FIBF_OTR_READ 0x8000
-#define FIBF_OTR_WRITE 0x4000
-#define FIBF_OTR_EXECUTE 0x2000
-#define FIBF_OTR_DELETE 0x1000
-#define FIBF_GRP_READ 0x0800
-#define FIBF_GRP_WRITE 0x0400
-#define FIBF_GRP_EXECUTE 0x0200
-#define FIBF_GRP_DELETE 0x0100
-
-#define FIBF_HIDDEN 0x0080
-#define FIBF_SCRIPT 0x0040
-#define FIBF_PURE 0x0020 /* no use under linux */
-#define FIBF_ARCHIVED 0x0010 /* never set, always cleared on write */
-#define FIBF_NOREAD 0x0008 /* 0 means allowed */
-#define FIBF_NOWRITE 0x0004 /* 0 means allowed */
-#define FIBF_NOEXECUTE 0x0002 /* 0 means allowed, ignored under linux */
-#define FIBF_NODELETE 0x0001 /* 0 means allowed */
-
-#define FIBF_OWNER 0x000F /* Bits pertaining to owner */
-#define FIBF_MASK 0xEE0E /* Bits modified by Linux */
-
-#endif
diff --git a/include/linux/ata.h b/include/linux/ata.h
index af6859b3a93d..ad7d9ee89ff0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -817,11 +817,6 @@ static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
}
-static inline bool ata_id_sct_write_same(const u16 *id)
-{
- return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
-}
-
static inline bool ata_id_sct_long_sector_access(const u16 *id)
{
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
deleted file mode 100644
index bd2560502f3c..000000000000
--- a/include/linux/atmel_serial.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * include/linux/atmel_serial.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * USART registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ATMEL_SERIAL_H
-#define ATMEL_SERIAL_H
-
-#define ATMEL_US_CR 0x00 /* Control Register */
-#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
-#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
-#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
-#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
-#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
-#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
-#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
-#define ATMEL_US_STTBRK BIT(9) /* Start Break */
-#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
-#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
-#define ATMEL_US_SENDA BIT(12) /* Send Address */
-#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
-#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
-#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
-#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
-#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
-#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
-#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
-#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
-#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
-#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
-#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
-#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
-
-#define ATMEL_US_MR 0x04 /* Mode Register */
-#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
-#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
-#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
-#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
-#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
-#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
-#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
-#define ATMEL_US_MSBF BIT(16) /* Bit Order */
-#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
-#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
-#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
-#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
-#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
-#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
-#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
-
-#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
-#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
-#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
-#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
-#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
-#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
-#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
-#define ATMEL_US_FRAME BIT(6) /* Framing Error */
-#define ATMEL_US_PARE BIT(7) /* Parity Error */
-#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
-#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
-#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
-#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
-#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
-#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
-#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
-#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
-#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
-#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
-#define ATMEL_US_RI BIT(20) /* RI */
-#define ATMEL_US_DSR BIT(21) /* DSR */
-#define ATMEL_US_DCD BIT(22) /* DCD */
-#define ATMEL_US_CTS BIT(23) /* CTS */
-
-#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
-#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
-#define ATMEL_US_CSR 0x14 /* Channel Status Register */
-#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
-#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
-#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
-
-#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
-#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
-#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */
-#define ATMEL_US_FP_MASK 0x7
-
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
-#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
-#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
-
-#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
-#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
-
-#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
-#define ATMEL_US_NER 0x44 /* Number of Errors Register */
-#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
-
-#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
-#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
-#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
-#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
-#define ATMEL_US_ONE_DATA 0x0
-#define ATMEL_US_TWO_DATA 0x1
-#define ATMEL_US_FOUR_DATA 0x2
-#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
-#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
-#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
-#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
-
-#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
-#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
-#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
-
-#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
-#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
-#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
-#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
-#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
-#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
-#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
-#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
-#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
-#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
-#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
-#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
-#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
-#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
-
-#define ATMEL_US_NAME 0xf0 /* Ip Name */
-#define ATMEL_US_VERSION 0xfc /* Ip Version */
-
-#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e71835bf60a9..c56be7410130 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -423,6 +423,29 @@
#endif
#endif /* atomic_cmpxchg_relaxed */
+#ifndef atomic_try_cmpxchg
+
+#define __atomic_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ typeof(_po) __po = (_po); \
+ typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
+#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic_try_cmpxchg */
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg
+#endif /* atomic_try_cmpxchg */
+
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
@@ -996,6 +1019,29 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_try_cmpxchg
+
+#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ typeof(_po) __po = (_po); \
+ typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic64_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
+#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic64_try_cmpxchg */
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
+#endif /* atomic64_try_cmpxchg */
+
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 504e784b7ffa..2150bdccfbab 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -163,8 +163,7 @@ extern void audit_log_task_info(struct audit_buffer *ab,
extern int audit_update_lsm_rules(void);
/* Private API (for audit.c only) */
-extern int audit_rule_change(int type, __u32 portid, int seq,
- void *data, size_t datasz);
+extern int audit_rule_change(int type, int seq, void *data, size_t datasz);
extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
extern u32 audit_enabled;
@@ -332,7 +331,7 @@ static inline void audit_ptrace(struct task_struct *t)
/* Private API (for audit.c only) */
extern unsigned int audit_serial(void);
extern int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec *t, unsigned int *serial);
+ struct timespec64 *t, unsigned int *serial);
extern int audit_set_loginuid(kuid_t loginuid);
static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
@@ -511,7 +510,7 @@ static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{ }
static inline int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec *t, unsigned int *serial)
+ struct timespec64 *t, unsigned int *serial)
{
return 0;
}
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index ad955817916d..866c433e7d32 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -21,6 +21,7 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
+ WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
};
@@ -54,7 +55,9 @@ struct bdi_writeback_congested {
atomic_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *bdi; /* the associated bdi */
+ struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
+ * on bdi unregistration. For memcg-wb
+ * internal use only! */
int blkcg_id; /* ID of the associated blkcg */
struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
#endif
@@ -143,7 +146,7 @@ struct backing_dev_info {
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
- char *name;
+ const char *name;
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -161,7 +164,6 @@ struct backing_dev_info {
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
- atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c52a48cb9a66..557d84063934 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -17,8 +17,6 @@
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
-int __must_check bdi_init(struct backing_dev_info *bdi);
-
static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
{
kref_get(&bdi->refcnt);
@@ -27,16 +25,18 @@ static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
void bdi_put(struct backing_dev_info *bdi);
-__printf(3, 4)
-int bdi_register(struct backing_dev_info *bdi, struct device *parent,
- const char *fmt, ...);
-int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+__printf(2, 3)
+int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
+int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
+ va_list args);
int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
-int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
-void bdi_destroy(struct backing_dev_info *bdi);
struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
+static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
+{
+ return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
+}
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 9657f11d48a7..bca6a5e4ca3d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -80,7 +80,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
-#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */
+#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */
#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 8e521194f6fc..d1b04b0e99cf 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
-static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
+static inline unsigned bio_segments(struct bio *bio)
{
unsigned segs = 0;
struct bio_vec bv;
@@ -205,17 +205,12 @@ static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
break;
}
- __bio_for_each_segment(bv, bio, iter, *bvec)
+ bio_for_each_segment(bv, bio, iter)
segs++;
return segs;
}
-static inline unsigned bio_segments(struct bio *bio)
-{
- return __bio_segments(bio, &bio->bi_iter);
-}
-
/*
* get a reference to a bio, so it won't disappear. the intended use is
* something like:
@@ -383,14 +378,12 @@ extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern mempool_t *biovec_create_pool(int pool_entries);
-extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
+extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
-extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t,
- struct bio_set *, int, int);
extern struct bio_set *fs_bio_set;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..fcd641032f8d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -15,7 +15,7 @@ struct blk_mq_hw_ctx {
unsigned long state; /* BLK_MQ_S_* flags */
} ____cacheline_aligned_in_smp;
- struct work_struct run_work;
+ struct delayed_work run_work;
cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
@@ -51,14 +51,17 @@ struct blk_mq_hw_ctx {
atomic_t nr_active;
- struct delayed_work delay_work;
-
struct hlist_node cpuhp_dead;
struct kobject kobj;
unsigned long poll_considered;
unsigned long poll_invoked;
unsigned long poll_success;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+#endif
};
struct blk_mq_tag_set {
@@ -81,7 +84,6 @@ struct blk_mq_tag_set {
struct blk_mq_queue_data {
struct request *rq;
- struct list_head *list;
bool last;
};
@@ -89,9 +91,9 @@ typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(void *, struct request *, unsigned int,
+typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int, unsigned int);
-typedef void (exit_request_fn)(void *, struct request *, unsigned int,
+typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
typedef int (reinit_request_fn)(void *, struct request *);
@@ -142,6 +144,14 @@ struct blk_mq_ops {
reinit_request_fn *reinit_request;
map_queues_fn *map_queues;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ /*
+ * Used by the debugfs implementation to show driver-specific
+ * information about a request.
+ */
+ void (*show_rq)(struct seq_file *m, struct request *rq);
+#endif
};
enum {
@@ -152,7 +162,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
@@ -162,6 +171,7 @@ enum {
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
BLK_MQ_S_TAG_WAITING = 3,
+ BLK_MQ_S_START_ON_RUN = 4,
BLK_MQ_MAX_DEPTH = 10240,
@@ -228,8 +238,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -238,13 +247,15 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
-void blk_mq_freeze_queue_start(struct request_queue *q);
+void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d703acb55d0f..61339bc44400 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -17,6 +17,10 @@ struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
+struct blk_issue_stat {
+ u64 stat;
+};
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -29,7 +33,7 @@ struct bio {
* top bits REQ_OP. Use
* accessors.
*/
- unsigned short bi_flags; /* status, command, etc */
+ unsigned short bi_flags; /* status, etc and bvec pool number */
unsigned short bi_ioprio;
struct bvec_iter bi_iter;
@@ -58,6 +62,10 @@ struct bio {
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ void *bi_cg_private;
+ struct blk_issue_stat bi_issue_stat;
+#endif
#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -102,12 +110,9 @@ struct bio {
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
#define BIO_THROTTLED 9 /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS 10
+#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ * of this bio. */
+/* See BVEC_POOL_OFFSET below before adding new flags */
/*
* We support 6 different bvec pools, the last one is magic in that it
@@ -117,13 +122,22 @@ struct bio {
#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
/*
- * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
+ * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
* 1 to the actual index so that 0 indicates that there are no bvecs to be
* freed.
*/
-#define BVEC_POOL_BITS (4)
+#define BVEC_POOL_BITS (3)
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
+#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
+# error "BVEC_POOL_BITS is too small"
+#endif
+
+/*
+ * Flags starting here get preserved by bio_reset() - this includes
+ * only BVEC_POOL_IDX()
+ */
+#define BIO_RESET_BITS BVEC_POOL_OFFSET
/*
* Operations and flags common to the bio and request structures.
@@ -160,7 +174,7 @@ enum req_opf {
/* write the same sector many times */
REQ_OP_WRITE_SAME = 7,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 8,
+ REQ_OP_WRITE_ZEROES = 9,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
@@ -187,6 +201,10 @@ enum req_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
+
+ /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ __REQ_NOUNMAP, /* do not free blocks when zeroing */
+
__REQ_NR_BITS, /* stops here */
};
@@ -204,6 +222,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
+#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
+
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -283,12 +303,6 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
return (cookie & BLK_QC_T_INTERNAL) != 0;
}
-struct blk_issue_stat {
- u64 time;
-};
-
-#define BLK_RQ_STAT_BATCH 64
-
struct blk_rq_stat {
s64 mean;
u64 min;
@@ -296,7 +310,6 @@ struct blk_rq_stat {
s32 nr_samples;
s32 nr_batch;
u64 batch;
- s64 time;
};
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5a7da607ca04..ab92c4ea138b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -40,15 +40,20 @@ struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
struct rq_wb;
+struct blk_queue_stats;
+struct blk_stat_callback;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
+/* Must be consisitent with blk_mq_poll_stats_bkt() */
+#define BLK_MQ_POLL_STATS_BKTS 16
+
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 2
+#define BLKCG_MAX_POLS 3
typedef void (rq_end_io_fn)(struct request *, int);
@@ -173,6 +178,7 @@ struct request {
struct rb_node rb_node; /* sort/lookup */
struct bio_vec special_vec;
void *completion_data;
+ int error_count; /* for legacy drivers, don't use */
};
/*
@@ -213,16 +219,14 @@ struct request {
unsigned short ioprio;
- void *special; /* opaque pointer available for LLD use */
+ unsigned int timeout;
- int errors;
+ void *special; /* opaque pointer available for LLD use */
unsigned int extra_len; /* length of alignment and padding */
unsigned long deadline;
struct list_head timeout_list;
- unsigned int timeout;
- int retries;
/*
* completion callback.
@@ -337,7 +341,6 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char cluster;
- unsigned char discard_zeroes_data;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
};
@@ -388,6 +391,7 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct blk_queue_stats *stats;
struct rq_wb *rq_wb;
/*
@@ -505,8 +509,6 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
- struct blk_rq_stat rq_stats[2];
-
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
@@ -516,6 +518,10 @@ struct request_queue {
unsigned int rq_timeout;
int poll_nsec;
+
+ struct blk_stat_callback *poll_cb;
+ struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
@@ -573,7 +579,7 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
- struct dentry *mq_debugfs_dir;
+ struct dentry *sched_debugfs_dir;
#endif
bool mq_sysfs_init_done;
@@ -610,7 +616,8 @@ struct request_queue {
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
+#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
+#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -919,6 +926,7 @@ extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
+extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
@@ -964,7 +972,7 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *,
gfp_t);
-extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+extern void blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
@@ -1082,20 +1090,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
}
/*
- * blk_rq_set_prio - associate a request with prio from ioc
- * @rq: request of interest
- * @ioc: target iocontext
- *
- * Assocate request prio with ioc prio so request based drivers
- * can leverage priority information.
- */
-static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
-{
- if (ioc)
- rq->ioprio = ioc->ioprio;
-}
-
-/*
* Request issue related functions.
*/
extern struct request *blk_peek_request(struct request_queue *q);
@@ -1121,13 +1115,10 @@ extern void blk_finish_request(struct request *rq, int error);
extern bool blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, int error);
-extern bool blk_end_request_cur(struct request *rq, int error);
-extern bool blk_end_request_err(struct request *rq, int error);
extern bool __blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, int error);
extern bool __blk_end_request_cur(struct request *rq, int error);
-extern bool __blk_end_request_err(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
@@ -1330,23 +1321,27 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag];
}
+extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page);
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int flags,
struct bio **biop);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+
+#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
+#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
- bool discard);
+ unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, bool discard);
+ sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
+
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
@@ -1360,7 +1355,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
return blkdev_issue_zeroout(sb->s_bdev,
block << (sb->s_blocksize_bits - 9),
nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask, true);
+ gfp_mask, 0);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1530,19 +1525,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
return q->limits.discard_alignment;
}
-static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
-{
- if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
- return 1;
-
- return 0;
-}
-
-static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
-{
- return queue_discard_zeroes_data(bdev_get_queue(bdev));
-}
-
static inline unsigned int bdev_write_same(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1673,12 +1655,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
return true;
}
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
- struct bio *next)
+static inline bool bio_will_gap(struct request_queue *q,
+ struct request *prev_rq,
+ struct bio *prev,
+ struct bio *next)
{
if (bio_has_data(prev) && queue_virt_boundary(q)) {
struct bio_vec pb, nb;
+ /*
+ * don't merge if the 1st bio starts with non-zero
+ * offset, otherwise it is quite difficult to respect
+ * sg gap limit. We work hard to merge a huge number of small
+ * single bios in case of mkfs.
+ */
+ if (prev_rq)
+ bio_get_first_bvec(prev_rq->bio, &pb);
+ else
+ bio_get_first_bvec(prev, &pb);
+ if (pb.bv_offset)
+ return true;
+
+ /*
+ * We don't need to worry about the situation that the
+ * merged segment ends in unaligned virt boundary:
+ *
+ * - if 'pb' ends aligned, the merged segment ends aligned
+ * - if 'pb' ends unaligned, the next bio must include
+ * one single bvec of 'nb', otherwise the 'nb' can't
+ * merge with 'pb'
+ */
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
@@ -1691,18 +1697,19 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
- return bio_will_gap(req->q, req->biotail, bio);
+ return bio_will_gap(req->q, req, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
- return bio_will_gap(req->q, bio, req->bio);
+ return bio_will_gap(req->q, NULL, bio, req->bio);
}
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
@@ -1916,28 +1923,12 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-/**
- * struct blk_dax_ctl - control and output parameters for ->direct_access
- * @sector: (input) offset relative to a block_device
- * @addr: (output) kernel virtual address for @sector populated by driver
- * @pfn: (output) page frame number for @addr populated by driver
- * @size: (input) number of bytes requested
- */
-struct blk_dax_ctl {
- sector_t sector;
- void *addr;
- long size;
- pfn_t pfn;
-};
-
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
- long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1956,9 +1947,6 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
-extern int bdev_dax_supported(struct super_block *, int);
-extern bool bdev_dax_capable(struct block_device *);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 909fc033173a..6bb38d76faf4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -35,6 +35,7 @@ struct bpf_map_ops {
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
+ u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
};
struct bpf_map {
@@ -49,12 +50,7 @@ struct bpf_map {
const struct bpf_map_ops *ops;
struct work_struct work;
atomic_t usercnt;
-};
-
-struct bpf_map_type_list {
- struct list_head list_node;
- const struct bpf_map_ops *ops;
- enum bpf_map_type type;
+ struct bpf_map *inner_map_meta;
};
/* function argument constraints */
@@ -167,12 +163,8 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog);
-};
-
-struct bpf_prog_type_list {
- struct list_head list_node;
- const struct bpf_verifier_ops *ops;
- enum bpf_prog_type type;
+ int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
};
struct bpf_prog_aux {
@@ -231,11 +223,21 @@ typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-void bpf_register_map_type(struct bpf_map_type_list *tl);
+#define BPF_PROG_TYPE(_id, _ops) \
+ extern const struct bpf_verifier_ops _ops;
+#define BPF_MAP_TYPE(_id, _ops) \
+ extern const struct bpf_map_ops _ops;
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_MAP_TYPE
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
@@ -275,6 +277,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
void bpf_fd_array_map_clear(struct bpf_map *map);
+int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+ void *key, void *value, u64 map_flags);
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
@@ -295,10 +299,6 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
#else
-static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
-{
-}
-
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
new file mode 100644
index 000000000000..03bf223f18be
--- /dev/null
+++ b/include/linux/bpf_types.h
@@ -0,0 +1,36 @@
+/* internal file - do not include directly */
+
+#ifdef CONFIG_NET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
+#endif
+#ifdef CONFIG_BPF_EVENTS
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
+#endif
+
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
+#ifdef CONFIG_CGROUPS
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
+#ifdef CONFIG_PERF_EVENTS
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index a13b031dc6b8..d5093b52b485 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -40,6 +40,9 @@ struct bpf_reg_state {
*/
s64 min_value;
u64 max_value;
+ u32 min_align;
+ u32 aux_off;
+ u32 aux_off_align;
};
enum bpf_stack_slot_type {
@@ -66,7 +69,10 @@ struct bpf_verifier_state_list {
};
struct bpf_insn_aux_data {
- enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+ union {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ };
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -84,6 +90,7 @@ struct bpf_verifier_env {
struct bpf_prog *prog; /* eBPF program being verified */
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
+ bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 55e517130311..abcda9b458ab 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -25,6 +25,9 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7260 0xae025190
+#define PHY_ID_BCM7268 0xae025090
+#define PHY_ID_BCM7271 0xae0253b0
#define PHY_ID_BCM7278 0xae0251a0
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 79591c3660cc..bd029e52ef5e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -196,8 +196,6 @@ void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int _submit_bh(int op, int op_flags, struct buffer_head *bh,
- unsigned long bio_flags);
int submit_bh(int, int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 5828489309bb..687b557fc5eb 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
-const struct bug_entry *find_bug(unsigned long bugaddr);
+struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index df08a41d5be5..c9a17bb1221c 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -5,7 +5,7 @@
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
- * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
*
*/
@@ -17,7 +17,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20120528"
+#define CAN_VERSION "20170425"
/* increment this number each time you change some user-space interface */
#define CAN_ABI_VERSION "9"
@@ -45,12 +45,13 @@ struct can_proto {
extern int can_proto_register(const struct can_proto *cp);
extern void can_proto_unregister(const struct can_proto *cp);
-int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+int can_rx_register(struct net *net, struct net_device *dev,
+ canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *),
void *data, char *ident, struct sock *sk);
-extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
- canid_t mask,
+extern void can_rx_unregister(struct net *net, struct net_device *dev,
+ canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *),
void *data);
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
new file mode 100644
index 000000000000..46dceef2cfa6
--- /dev/null
+++ b/include/linux/can/dev/peak_canfd.h
@@ -0,0 +1,308 @@
+/*
+ * CAN driver for PEAK System micro-CAN based adapters
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef PUCAN_H
+#define PUCAN_H
+
+/* uCAN commands opcodes list (low-order 10 bits) */
+#define PUCAN_CMD_NOP 0x000
+#define PUCAN_CMD_RESET_MODE 0x001
+#define PUCAN_CMD_NORMAL_MODE 0x002
+#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003
+#define PUCAN_CMD_TIMING_SLOW 0x004
+#define PUCAN_CMD_TIMING_FAST 0x005
+#define PUCAN_CMD_SET_STD_FILTER 0x006
+#define PUCAN_CMD_RESERVED2 0x007
+#define PUCAN_CMD_FILTER_STD 0x008
+#define PUCAN_CMD_TX_ABORT 0x009
+#define PUCAN_CMD_WR_ERR_CNT 0x00a
+#define PUCAN_CMD_SET_EN_OPTION 0x00b
+#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
+#define PUCAN_CMD_RX_BARRIER 0x010
+#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
+
+/* uCAN received messages list */
+#define PUCAN_MSG_CAN_RX 0x0001
+#define PUCAN_MSG_ERROR 0x0002
+#define PUCAN_MSG_STATUS 0x0003
+#define PUCAN_MSG_BUSLOAD 0x0004
+
+#define PUCAN_MSG_CACHE_CRITICAL 0x0102
+
+/* uCAN transmitted messages */
+#define PUCAN_MSG_CAN_TX 0x1000
+
+/* uCAN command common header */
+struct __packed pucan_command {
+ __le16 opcode_channel;
+ u16 args[3];
+};
+
+/* return the opcode from the opcode_channel field of a command */
+static inline u16 pucan_cmd_get_opcode(struct pucan_command *c)
+{
+ return le16_to_cpu(c->opcode_channel) & 0x3ff;
+}
+
+#define PUCAN_TSLOW_BRP_BITS 10
+#define PUCAN_TSLOW_TSGEG1_BITS 8
+#define PUCAN_TSLOW_TSGEG2_BITS 7
+#define PUCAN_TSLOW_SJW_BITS 7
+
+#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1)
+#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1)
+#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1)
+#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1)
+
+/* uCAN TIMING_SLOW command fields */
+#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \
+ ((!!(t)) << 7))
+#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK)
+#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK)
+#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK)
+
+struct __packed pucan_timing_slow {
+ __le16 opcode_channel;
+
+ u8 ewl; /* Error Warning limit */
+ u8 sjw_t; /* Sync Jump Width + Triple sampling */
+ u8 tseg2; /* Timing SEGment 2 */
+ u8 tseg1; /* Timing SEGment 1 */
+
+ __le16 brp; /* BaudRate Prescaler */
+};
+
+#define PUCAN_TFAST_BRP_BITS 10
+#define PUCAN_TFAST_TSGEG1_BITS 5
+#define PUCAN_TFAST_TSGEG2_BITS 4
+#define PUCAN_TFAST_SJW_BITS 4
+
+#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1)
+#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1)
+#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1)
+#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1)
+
+/* uCAN TIMING_FAST command fields */
+#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK)
+#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK)
+#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK)
+#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK)
+
+struct __packed pucan_timing_fast {
+ __le16 opcode_channel;
+
+ u8 unused;
+ u8 sjw; /* Sync Jump Width */
+ u8 tseg2; /* Timing SEGment 2 */
+ u8 tseg1; /* Timing SEGment 1 */
+
+ __le16 brp; /* BaudRate Prescaler */
+};
+
+/* uCAN FILTER_STD command fields */
+#define PUCAN_FLTSTD_ROW_IDX_BITS 6
+
+struct __packed pucan_filter_std {
+ __le16 opcode_channel;
+
+ __le16 idx;
+ __le32 mask; /* CAN-ID bitmask in idx range */
+};
+
+#define PUCAN_FLTSTD_ROW_IDX_MAX ((1 << PUCAN_FLTSTD_ROW_IDX_BITS) - 1)
+
+/* uCAN SET_STD_FILTER command fields */
+struct __packed pucan_std_filter {
+ __le16 opcode_channel;
+
+ u8 unused;
+ u8 idx;
+ __le32 mask; /* CAN-ID bitmask in idx range */
+};
+
+/* uCAN TX_ABORT commands fields */
+#define PUCAN_TX_ABORT_FLUSH 0x0001
+
+struct __packed pucan_tx_abort {
+ __le16 opcode_channel;
+
+ __le16 flags;
+ u32 unused;
+};
+
+/* uCAN WR_ERR_CNT command fields */
+#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */
+#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */
+
+struct __packed pucan_wr_err_cnt {
+ __le16 opcode_channel;
+
+ __le16 sel_mask;
+ u8 tx_counter; /* Tx error counter new value */
+ u8 rx_counter; /* Rx error counter new value */
+
+ u16 unused;
+};
+
+/* uCAN SET_EN/CLR_DIS _OPTION command fields */
+#define PUCAN_OPTION_ERROR 0x0001
+#define PUCAN_OPTION_BUSLOAD 0x0002
+#define PUCAN_OPTION_CANDFDISO 0x0004
+
+struct __packed pucan_options {
+ __le16 opcode_channel;
+
+ __le16 options;
+ u32 unused;
+};
+
+/* uCAN received messages global format */
+struct __packed pucan_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+};
+
+/* uCAN flags for CAN/CANFD messages */
+#define PUCAN_MSG_SELF_RECEIVE 0x80
+#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */
+#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */
+#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */
+#define PUCAN_MSG_SINGLE_SHOT 0x08
+#define PUCAN_MSG_LOOPED_BACK 0x04
+#define PUCAN_MSG_EXT_ID 0x02
+#define PUCAN_MSG_RTR 0x01
+
+struct __packed pucan_rx_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ __le32 tag_low;
+ __le32 tag_high;
+ u8 channel_dlc;
+ u8 client;
+ __le16 flags;
+ __le32 can_id;
+ u8 d[0];
+};
+
+/* uCAN error types */
+#define PUCAN_ERMSG_BIT_ERROR 0
+#define PUCAN_ERMSG_FORM_ERROR 1
+#define PUCAN_ERMSG_STUFF_ERROR 2
+#define PUCAN_ERMSG_OTHER_ERROR 3
+#define PUCAN_ERMSG_ERR_CNT_DEC 4
+
+struct __packed pucan_error_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel_type_d;
+ u8 code_g;
+ u8 tx_err_cnt;
+ u8 rx_err_cnt;
+};
+
+static inline int pucan_error_get_channel(const struct pucan_error_msg *msg)
+{
+ return msg->channel_type_d & 0x0f;
+}
+
+#define PUCAN_RX_BARRIER 0x10
+#define PUCAN_BUS_PASSIVE 0x20
+#define PUCAN_BUS_WARNING 0x40
+#define PUCAN_BUS_BUSOFF 0x80
+
+struct __packed pucan_status_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel_p_w_b;
+ u8 unused[3];
+};
+
+static inline int pucan_status_get_channel(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & 0x0f;
+}
+
+static inline int pucan_status_is_rx_barrier(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_RX_BARRIER;
+}
+
+static inline int pucan_status_is_passive(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_BUS_PASSIVE;
+}
+
+static inline int pucan_status_is_warning(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_BUS_WARNING;
+}
+
+static inline int pucan_status_is_busoff(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_BUS_BUSOFF;
+}
+
+/* uCAN transmitted message format */
+#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4))
+
+struct __packed pucan_tx_msg {
+ __le16 size;
+ __le16 type;
+ __le32 tag_low;
+ __le32 tag_high;
+ u8 channel_dlc;
+ u8 client;
+ __le16 flags;
+ __le32 can_id;
+ u8 d[0];
+};
+
+/* build the cmd opcode_channel field with respect to the correct endianness */
+static inline __le16 pucan_cmd_opcode_channel(int index, int opcode)
+{
+ return cpu_to_le16(((index) << 12) | ((opcode) & 0x3ff));
+}
+
+/* return the channel number part from any received message channel_dlc field */
+static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
+{
+ return msg->channel_dlc & 0xf;
+}
+
+/* return the dlc value from any received message channel_dlc field */
+static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+{
+ return msg->channel_dlc >> 4;
+}
+
+static inline int pucan_ermsg_get_channel(const struct pucan_error_msg *msg)
+{
+ return msg->channel_type_d & 0x0f;
+}
+
+static inline int pucan_stmsg_get_channel(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & 0x0f;
+}
+
+#endif
diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h
deleted file mode 100644
index a52f47ca6c8a..000000000000
--- a/include/linux/can/platform/ti_hecc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _CAN_PLATFORM_TI_HECC_H
-#define _CAN_PLATFORM_TI_HECC_H
-
-/*
- * TI HECC (High End CAN Controller) driver platform header
- *
- * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-/**
- * struct hecc_platform_data - HECC Platform Data
- *
- * @scc_hecc_offset: mostly 0 - should really never change
- * @scc_ram_offset: SCC RAM offset
- * @hecc_ram_offset: HECC RAM offset
- * @mbx_offset: Mailbox RAM offset
- * @int_line: Interrupt line to use - 0 or 1
- * @version: version for future use
- * @transceiver_switch: platform specific callback fn for transceiver control
- *
- * Platform data structure to get all platform specific settings.
- * this structure also accounts the fact that the IP may have different
- * RAM and mailbox offsets for different SOC's
- */
-struct ti_hecc_platform_data {
- u32 scc_hecc_offset;
- u32 scc_ram_offset;
- u32 hecc_ram_offset;
- u32 mbx_offset;
- u32 int_line;
- u32 version;
- void (*transceiver_switch) (int);
-};
-#endif /* !_CAN_PLATFORM_TI_HECC_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c41b8d99dd0e..3285c944194a 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -123,6 +123,10 @@ enum ccp_aes_mode {
CCP_AES_MODE_CFB,
CCP_AES_MODE_CTR,
CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE_GCM,
+ CCP_AES_MODE_GMAC,
CCP_AES_MODE__LAST,
};
@@ -137,6 +141,9 @@ enum ccp_aes_action {
CCP_AES_ACTION_ENCRYPT,
CCP_AES_ACTION__LAST,
};
+/* Overloaded field */
+#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT
+#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT
/**
* struct ccp_aes_engine - CCP AES operation
@@ -181,6 +188,8 @@ struct ccp_aes_engine {
struct scatterlist *cmac_key; /* K1/K2 cmac key required for
* final cmac cmd */
u32 cmac_key_len; /* In bytes */
+
+ u32 aad_len; /* In bytes */
};
/***** XTS-AES engine *****/
@@ -249,6 +258,8 @@ enum ccp_sha_type {
CCP_SHA_TYPE_1 = 1,
CCP_SHA_TYPE_224,
CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
CCP_SHA_TYPE__LAST,
};
@@ -290,6 +301,60 @@ struct ccp_sha_engine {
* final sha cmd */
};
+/***** 3DES engine *****/
+enum ccp_des3_mode {
+ CCP_DES3_MODE_ECB = 0,
+ CCP_DES3_MODE_CBC,
+ CCP_DES3_MODE_CFB,
+ CCP_DES3_MODE__LAST,
+};
+
+enum ccp_des3_type {
+ CCP_DES3_TYPE_168 = 1,
+ CCP_DES3_TYPE__LAST,
+ };
+
+enum ccp_des3_action {
+ CCP_DES3_ACTION_DECRYPT = 0,
+ CCP_DES3_ACTION_ENCRYPT,
+ CCP_DES3_ACTION__LAST,
+};
+
+/**
+ * struct ccp_des3_engine - CCP SHA operation
+ * @type: Type of 3DES operation
+ * @mode: cipher mode
+ * @action: 3DES operation (decrypt/encrypt)
+ * @key: key to be used for this 3DES operation
+ * @key_len: length of key (in bytes)
+ * @iv: IV to be used for this AES operation
+ * @iv_len: length in bytes of iv
+ * @src: input data to be used for this operation
+ * @src_len: length of input data used for this operation (in bytes)
+ * @dst: output data produced by this operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - type, mode, action, key, key_len, src, dst, src_len
+ * - iv, iv_len for any mode other than ECB
+ *
+ * The iv variable is used as both input and output. On completion of the
+ * 3DES operation the new IV overwrites the old IV.
+ */
+struct ccp_des3_engine {
+ enum ccp_des3_type type;
+ enum ccp_des3_mode mode;
+ enum ccp_des3_action action;
+
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+ struct scatterlist *iv;
+ u32 iv_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u64 src_len; /* In bytes */
+};
+
/***** RSA engine *****/
/**
* struct ccp_rsa_engine - CCP RSA operation
@@ -539,7 +604,7 @@ struct ccp_ecc_engine {
enum ccp_engine {
CCP_ENGINE_AES = 0,
CCP_ENGINE_XTS_AES_128,
- CCP_ENGINE_RSVD1,
+ CCP_ENGINE_DES3,
CCP_ENGINE_SHA,
CCP_ENGINE_RSA,
CCP_ENGINE_PASSTHRU,
@@ -587,6 +652,7 @@ struct ccp_cmd {
union {
struct ccp_aes_engine aes;
struct ccp_xts_aes_engine xts;
+ struct ccp_des3_engine des3;
struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index f8763615a5f2..408bc09ce497 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -4,6 +4,7 @@
#include <linux/kobject.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/device.h>
struct file_operations;
struct inode;
@@ -26,6 +27,10 @@ void cdev_put(struct cdev *p);
int cdev_add(struct cdev *, dev_t, unsigned);
+void cdev_set_parent(struct cdev *p, struct kobject *kobj);
+int cdev_device_add(struct cdev *cdev, struct device *dev);
+void cdev_device_del(struct cdev *cdev, struct device *dev);
+
void cdev_del(struct cdev *);
void cd_forget(struct inode *);
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index aa2e19182d99..51c5bd64bd00 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -3,6 +3,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/string.h>
+
#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
/*
@@ -12,12 +14,10 @@
*/
# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
-extern const char *ceph_file_part(const char *s, int len);
# define dout(fmt, ...) \
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
- ceph_file_part(__FILE__, sizeof(__FILE__)), \
- __LINE__, ##__VA_ARGS__)
+ kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index ae2f66833762..fd8b2953c78f 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -105,8 +105,10 @@ static inline u64 ceph_sanitize_features(u64 features)
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
+ CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
@@ -114,11 +116,13 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
+ CEPH_FEATURE_MDS_INLINE_DATA | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index f4b2ee18f38c..ad078ebe25d6 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -365,6 +365,19 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_READDIR_FRAG_END (1<<0)
#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
#define CEPH_READDIR_HASH_ORDER (1<<9)
+#define CEPH_READDIR_OFFSET_HASH (1<<10)
+
+/*
+ * open request flags
+ */
+#define CEPH_O_RDONLY 00000000
+#define CEPH_O_WRONLY 00000001
+#define CEPH_O_RDWR 00000002
+#define CEPH_O_CREAT 00000100
+#define CEPH_O_EXCL 00000200
+#define CEPH_O_TRUNC 00001000
+#define CEPH_O_DIRECTORY 00200000
+#define CEPH_O_NOFOLLOW 00400000
union ceph_mds_request_args {
struct {
@@ -384,6 +397,7 @@ union ceph_mds_request_args {
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
__le16 flags;
+ __le32 offset_hash;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
index 84884d8d4710..0594d3bba774 100644
--- a/include/linux/ceph/cls_lock_client.h
+++ b/include/linux/ceph/cls_lock_client.h
@@ -37,6 +37,11 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
char *lock_name, char *cookie,
struct ceph_entity_name *locker);
+int ceph_cls_set_cookie(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, u8 type, char *old_cookie,
+ char *tag, char *new_cookie);
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 88cd5dc8e238..3229ae6c7846 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/slab.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
@@ -161,7 +162,7 @@ struct ceph_client {
* dirtied.
*/
struct ceph_snap_context {
- atomic_t nref;
+ refcount_t nref;
u64 seq;
u32 num_snaps;
u64 snaps[];
@@ -262,10 +263,7 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
-extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
- void *private,
- u64 supported_features,
- u64 required_features);
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 8ed5dc505fbb..d5f783f3226a 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,6 +25,7 @@ struct ceph_mdsmap {
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
u32 m_max_mds; /* size of m_addr, m_state arrays */
+ int m_num_mds;
struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
@@ -40,7 +41,7 @@ struct ceph_mdsmap {
static inline struct ceph_entity_addr *
ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
{
- if (w >= m->m_max_mds)
+ if (w >= m->m_num_mds)
return NULL;
return &m->m_info[w].addr;
}
@@ -48,14 +49,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
{
BUG_ON(w < 0);
- if (w >= m->m_max_mds)
+ if (w >= m->m_num_mds)
return CEPH_MDS_STATE_DNE;
return m->m_info[w].state;
}
static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
{
- if (w >= 0 && w < m->m_max_mds)
+ if (w >= 0 && w < m->m_num_mds)
return m->m_info[w].laggy;
return false;
}
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c125b5d9e13c..85650b415e73 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -5,6 +5,7 @@
#include <linux/kref.h>
#include <linux/mempool.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -27,7 +28,7 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
/* a given osd we're communicating with */
struct ceph_osd {
- atomic_t o_ref;
+ refcount_t o_ref;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
@@ -186,12 +187,12 @@ struct ceph_osd_request {
struct timespec r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
+ bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
int r_attempts;
- struct ceph_eversion r_replay_version; /* aka reassert_version */
u32 r_last_force_resend;
u32 r_map_dne_bound;
@@ -266,6 +267,7 @@ struct ceph_osd_client {
struct rb_root osds; /* osds */
struct list_head osd_lru; /* idle osds */
spinlock_t osd_lru_lock;
+ u32 epoch_barrier;
struct ceph_osd homeless_osd;
atomic64_t last_tid; /* tid of last request */
u64 last_linger_id;
@@ -304,6 +306,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
+void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 13d71fe18b0c..75a7db21457d 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -2,7 +2,7 @@
#define __FS_CEPH_PAGELIST_H
#include <asm/byteorder.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -13,7 +13,7 @@ struct ceph_pagelist {
size_t room;
struct list_head free_list;
size_t num_pages_free;
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct ceph_pagelist_cursor {
@@ -30,7 +30,7 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
- atomic_set(&pl->refcnt, 1);
+ refcount_set(&pl->refcnt, 1);
}
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 6a3f850cabab..21745946cae1 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
@@ -106,9 +107,6 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* PI: the parent css */
- struct cgroup_subsys_state *parent;
-
/* siblings list anchored at the parent's ->children */
struct list_head sibling;
struct list_head children;
@@ -138,6 +136,12 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
+
+ /*
+ * PI: the parent css. Placed here for cache proximity to following
+ * fields of the containing structure.
+ */
+ struct cgroup_subsys_state *parent;
};
/*
@@ -156,7 +160,7 @@ struct css_set {
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
/* reference count */
- atomic_t refcount;
+ refcount_t refcount;
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f6b43fbb141c..ed2573e149fa 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -17,11 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
-#include <linux/nsproxy.h>
#include <linux/types.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
#include <linux/cgroup-defs.h>
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
+static inline void cgroup_init_kthreadd(void)
+{
+ /*
+ * kthreadd is inherited by all kthreads, keep it in the root so
+ * that the new kthreads are guaranteed to stay in the root until
+ * initialization is finished.
+ */
+ current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+ /*
+ * This kthread finished initialization. The creator should have
+ * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+ */
+ current->no_cgroup_migration = 0;
+}
+
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
@@ -640,7 +661,7 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- atomic_t count;
+ refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -675,12 +696,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->count))
free_cgroup_ns(ns);
}
diff --git a/include/linux/clk.h b/include/linux/clk.h
index e9d36b3e49de..024cd07870d0 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -132,8 +132,8 @@ int clk_get_phase(struct clk *clk);
* @q: clk compared against p
*
* Returns true if the two struct clk pointers both point to the same hardware
- * clock node. Put differently, returns true if struct clk *p and struct clk *q
- * share the same struct clk_core object.
+ * clock node. Put differently, returns true if @p and @q
+ * share the same &struct clk_core object.
*
* Returns false otherwise. Note that two NULL clks are treated as matching.
*/
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 7007a5f48080..d23c9cf26993 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -125,5 +125,8 @@ extern void tegra210_xusb_pll_hw_control_enable(void);
extern void tegra210_xusb_pll_hw_sequence_start(void);
extern void tegra210_sata_pll_hw_control_enable(void);
extern void tegra210_sata_pll_hw_sequence_start(void);
+extern void tegra210_set_sata_pll_seq_sw(bool state);
+extern void tegra210_put_utmipll_in_iddq(void);
+extern void tegra210_put_utmipll_out_iddq(void);
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 6110fe09ed18..d18da839b810 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -19,6 +19,18 @@
#include <linux/clkdev.h>
/**
+ * struct clk_omap_reg - OMAP register declaration
+ * @offset: offset from the master IP module base address
+ * @index: index of the master IP module
+ */
+struct clk_omap_reg {
+ void __iomem *ptr;
+ u16 offset;
+ u8 index;
+ u8 flags;
+};
+
+/**
* struct dpll_data - DPLL registers and integration data
* @mult_div1_reg: register containing the DPLL M and N bitfields
* @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
@@ -67,12 +79,12 @@
* can be placed into read-only space.
*/
struct dpll_data {
- void __iomem *mult_div1_reg;
+ struct clk_omap_reg mult_div1_reg;
u32 mult_mask;
u32 div1_mask;
struct clk_hw *clk_bypass;
struct clk_hw *clk_ref;
- void __iomem *control_reg;
+ struct clk_omap_reg control_reg;
u32 enable_mask;
unsigned long last_rounded_rate;
u16 last_rounded_m;
@@ -84,8 +96,8 @@ struct dpll_data {
u16 max_divider;
unsigned long max_rate;
u8 modes;
- void __iomem *autoidle_reg;
- void __iomem *idlest_reg;
+ struct clk_omap_reg autoidle_reg;
+ struct clk_omap_reg idlest_reg;
u32 autoidle_mask;
u32 freqsel_mask;
u32 idlest_mask;
@@ -113,10 +125,10 @@ struct clk_hw_omap;
*/
struct clk_hw_omap_ops {
void (*find_idlest)(struct clk_hw_omap *oclk,
- void __iomem **idlest_reg,
+ struct clk_omap_reg *idlest_reg,
u8 *idlest_bit, u8 *idlest_val);
void (*find_companion)(struct clk_hw_omap *oclk,
- void __iomem **other_reg,
+ struct clk_omap_reg *other_reg,
u8 *other_bit);
void (*allow_idle)(struct clk_hw_omap *oclk);
void (*deny_idle)(struct clk_hw_omap *oclk);
@@ -129,8 +141,6 @@ struct clk_hw_omap_ops {
* @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
* @flags: see "struct clk.flags possibilities" above
* @clksel_reg: for clksel clks, register va containing src/divisor select
- * @clksel_mask: bitmask in @clksel_reg for the src/divisor selector
- * @clksel: for clksel clks, pointer to struct clksel for this clock
* @dpll_data: for DPLLs, pointer to struct dpll_data for this clock
* @clkdm_name: clockdomain name that this clock is contained in
* @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime
@@ -141,12 +151,10 @@ struct clk_hw_omap {
struct list_head node;
unsigned long fixed_rate;
u8 fixed_div;
- void __iomem *enable_reg;
+ struct clk_omap_reg enable_reg;
u8 enable_bit;
u8 flags;
- void __iomem *clksel_reg;
- u32 clksel_mask;
- const struct clksel *clksel;
+ struct clk_omap_reg clksel_reg;
struct dpll_data *dpll_data;
const char *clkdm_name;
struct clockdomain *clkdm;
@@ -172,7 +180,6 @@ struct clk_hw_omap {
* should be used. This is a temporary solution - a better approach
* would be to associate clock type-specific data with the clock,
* similar to the struct dpll_data approach.
- * MEMMAP_ADDRESSING: Use memmap addressing to access clock registers.
*/
#define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */
#define CLOCK_IDLE_CONTROL (1 << 1)
@@ -180,7 +187,6 @@ struct clk_hw_omap {
#define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */
#define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */
#define CLOCK_CLKOUTX2 (1 << 5)
-#define MEMMAP_ADDRESSING (1 << 6)
/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
#define DPLL_LOW_POWER_STOP 0x1
@@ -202,21 +208,12 @@ enum {
};
/**
- * struct clk_omap_reg - OMAP register declaration
- * @offset: offset from the master IP module base address
- * @index: index of the master IP module
- */
-struct clk_omap_reg {
- u16 offset;
- u16 index;
-};
-
-/**
* struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
* @clkdm_clk_enable: pointer to clockdomain enable function
* @clkdm_clk_disable: pointer to clockdomain disable function
+ * @clkdm_lookup: pointer to clockdomain lookup function
* @cm_wait_module_ready: pointer to CM module wait ready function
* @cm_split_idlest_reg: pointer to CM module function to split idlest reg
*
@@ -227,20 +224,20 @@ struct clk_omap_reg {
* operations not provided directly by clock drivers.
*/
struct ti_clk_ll_ops {
- u32 (*clk_readl)(void __iomem *reg);
- void (*clk_writel)(u32 val, void __iomem *reg);
+ u32 (*clk_readl)(const struct clk_omap_reg *reg);
+ void (*clk_writel)(u32 val, const struct clk_omap_reg *reg);
int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
int (*clkdm_clk_disable)(struct clockdomain *clkdm,
struct clk *clk);
+ struct clockdomain * (*clkdm_lookup)(const char *name);
int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
u8 idlest_shift);
- int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
- u8 *idlest_reg_id);
+ int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg,
+ s16 *prcm_inst, u8 *idlest_reg_id);
};
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
-void omap2_init_clk_clkdm(struct clk_hw *clk);
int omap2_clk_disable_autoidle_all(void);
int omap2_clk_enable_autoidle_all(void);
int omap2_clk_allow_idle(struct clk *clk);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 6d7edc3082f9..acc9ce05e5f0 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -182,7 +182,6 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *e
extern void clockevents_register_device(struct clock_event_device *dev);
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
-extern void clockevents_config(struct clock_event_device *dev, u32 freq);
extern void clockevents_config_and_register(struct clock_event_device *dev,
u32 freq, unsigned long min_delta,
unsigned long max_delta);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index cfc75848a35d..f2b10d9ebd04 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -120,7 +120,7 @@ struct clocksource {
#define CLOCK_SOURCE_RESELECT 0x100
/* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 03f32d0bd1d8..3e8fbf5a5c73 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -21,15 +21,19 @@ struct cma;
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
+extern const char *cma_get_name(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+
+extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 5b8721efa948..31e4e1f1547c 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -15,7 +15,6 @@ struct venus_comm {
struct list_head vc_processing;
int vc_inuse;
struct super_block *vc_sb;
- struct backing_dev_info bdi;
struct mutex vc_mutex;
};
diff --git a/include/linux/compat.h b/include/linux/compat.h
index aef47be2a5c1..1c5f3152cbb5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -295,6 +295,13 @@ struct compat_old_sigaction {
};
#endif
+struct compat_keyctl_kdf_params {
+ compat_uptr_t hashname;
+ compat_uptr_t otherinfo;
+ __u32 otherinfolen;
+ __u32 __spare[8];
+};
+
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -528,11 +535,6 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
-asmlinkage long compat_sys_getdents64(unsigned int fd,
- struct linux_dirent64 __user *dirent,
- unsigned int count);
-#endif
asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
unsigned int nr_segs, unsigned int flags);
asmlinkage long compat_sys_open(const char __user *filename, int flags,
@@ -723,6 +725,8 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
int, const char __user *);
+asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2);
+
/*
* For most but not all architectures, "am I in a compat syscall?" and
* "am I a compat task?" are the same question. For architectures on which
diff --git a/include/linux/console.h b/include/linux/console.h
index 5949d1855589..b8920a031a3e 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -212,4 +212,6 @@ extern bool vgacon_text_force(void);
static inline bool vgacon_text_force(void) { return false; }
#endif
+extern void console_init(void);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 2a5982c37dfb..035c16c9a505 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -201,7 +201,7 @@ struct coresight_ops_sink {
void *sink_config);
unsigned long (*reset_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
- void *sink_config, bool *lost);
+ void *sink_config);
void (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 87165f06a307..a5ce0bbeadb5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -120,6 +120,13 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int transition_delay_us;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 62d240e962f0..0f2a80377520 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -94,6 +94,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+ CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
@@ -137,6 +138,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 96f1e88b767c..2404ad238c0b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -40,9 +40,9 @@ extern int nr_cpu_ids;
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
#else
-#define nr_cpumask_bits NR_CPUS
+#define nr_cpumask_bits ((unsigned int)NR_CPUS)
#endif
/*
@@ -667,6 +667,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return mask != NULL;
+}
+
#else
typedef struct cpumask cpumask_var_t[1];
@@ -708,6 +713,11 @@ static inline void free_cpumask_var(cpumask_var_t mask)
static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
+
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return true;
+}
#endif /* CONFIG_CPUMASK_OFFSTACK */
/* It's common to want to use cpu_all_mask in struct member initializers,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 611fce58d670..119a3f9604b0 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -42,7 +42,7 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
-extern void cpuset_update_active_cpus(bool cpu_online);
+extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -155,7 +155,7 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
-static inline void cpuset_update_active_cpus(bool cpu_online)
+static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
new file mode 100644
index 000000000000..541a197ba4a2
--- /dev/null
+++ b/include/linux/crash_core.h
@@ -0,0 +1,69 @@
+#ifndef LINUX_CRASH_CORE_H
+#define LINUX_CRASH_CORE_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+
+#define CRASH_CORE_NOTE_NAME "CORE"
+#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
+#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
+#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+
+#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ CRASH_CORE_NOTE_NAME_BYTES + \
+ CRASH_CORE_NOTE_DESC_BYTES)
+
+#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
+#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ VMCOREINFO_NOTE_NAME_BYTES + \
+ VMCOREINFO_BYTES)
+
+typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+
+void crash_save_vmcoreinfo(void);
+void arch_crash_save_vmcoreinfo(void);
+__printf(1, 2)
+void vmcoreinfo_append_str(const char *fmt, ...);
+phys_addr_t paddr_vmcoreinfo_note(void);
+
+#define VMCOREINFO_OSRELEASE(value) \
+ vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
+#define VMCOREINFO_SYMBOL(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(name))
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(struct name))
+#define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_LENGTH(name, value) \
+ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
+#define VMCOREINFO_NUMBER(name) \
+ vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
+#define VMCOREINFO_CONFIG(name) \
+ vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+
+extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+extern size_t vmcoreinfo_size;
+extern size_t vmcoreinfo_max_size;
+
+Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
+ void *data, size_t data_len);
+void final_note(Elf_Word *buf);
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+
+#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c0b0cf3d2d2f..84da9978e951 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -123,7 +123,7 @@
/*
* Miscellaneous stuff.
*/
-#define CRYPTO_MAX_ALG_NAME 64
+#define CRYPTO_MAX_ALG_NAME 128
/*
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 3252799832cf..df4d3e943d28 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -10,9 +10,4 @@
void sha_init(__u32 *buf);
void sha_transform(__u32 *digest, const char *data, __u32 *W);
-#define MD5_DIGEST_WORDS 4
-#define MD5_MESSAGE_BYTES 64
-
-void md5_transform(__u32 *hash, __u32 const *in);
-
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index d8a3dc042e1c..5ec1f6c47716 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,74 @@
#include <asm/pgtable.h>
struct iomap_ops;
+struct dax_device;
+struct dax_operations {
+ /*
+ * direct_access: translate a device-relative
+ * logical-page-offset into an absolute physical pfn. Return the
+ * number of pages available for DAX at that pfn.
+ */
+ long (*direct_access)(struct dax_device *, pgoff_t, long,
+ void **, pfn_t *);
+};
+
+#if IS_ENABLED(CONFIG_DAX)
+struct dax_device *dax_get_by_host(const char *host);
+void put_dax(struct dax_device *dax_dev);
+#else
+static inline struct dax_device *dax_get_by_host(const char *host)
+{
+ return NULL;
+}
+
+static inline void put_dax(struct dax_device *dax_dev)
+{
+}
+#endif
+
+int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
+#if IS_ENABLED(CONFIG_FS_DAX)
+int __bdev_dax_supported(struct super_block *sb, int blocksize);
+static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+{
+ return __bdev_dax_supported(sb, blocksize);
+}
+
+static inline struct dax_device *fs_dax_get_by_host(const char *host)
+{
+ return dax_get_by_host(host);
+}
+
+static inline void fs_put_dax(struct dax_device *dax_dev)
+{
+ put_dax(dax_dev);
+}
+
+#else
+static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct dax_device *fs_dax_get_by_host(const char *host)
+{
+ return NULL;
+}
+
+static inline void fs_put_dax(struct dax_device *dax_dev)
+{
+}
+#endif
+
+int dax_read_lock(void);
+void dax_read_unlock(int id);
+struct dax_device *alloc_dax(void *private, const char *host,
+ const struct dax_operations *ops);
+bool dax_alive(struct dax_device *dax_dev);
+void kill_dax(struct dax_device *dax_dev);
+void *dax_get_private(struct dax_device *dax_dev);
+long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn);
/*
* We use lowest available bit in exceptional entry for locking, one bit for
@@ -41,24 +109,19 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
const struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
-int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
-struct page *read_dax_sector(struct block_device *bdev, sector_t n);
-int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+int __dax_zero_page_range(struct block_device *bdev,
+ struct dax_device *dax_dev, sector_t sector,
unsigned int offset, unsigned int length);
#else
-static inline struct page *read_dax_sector(struct block_device *bdev,
- sector_t n)
-{
- return ERR_PTR(-ENXIO);
-}
static inline int __dax_zero_page_range(struct block_device *bdev,
- sector_t sector, unsigned int offset, unsigned int length)
+ struct dax_device *dax_dev, sector_t sector,
+ unsigned int offset, unsigned int length)
{
return -ENXIO;
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7dff776e6d16..9174b0d28582 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -74,7 +74,7 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
- .llseek = generic_file_llseek, \
+ .llseek = no_llseek, \
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
index 7009b8bec77b..3f033c48071e 100644
--- a/include/linux/dell-led.h
+++ b/include/linux/dell-led.h
@@ -1,10 +1,6 @@
#ifndef __DELL_LED_H__
#define __DELL_LED_H__
-enum {
- DELL_LED_MICMUTE,
-};
-
-int dell_app_wmi_led_set(int whichled, int on);
+int dell_micmute_led_set(int on);
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index e0acb0e5243b..6c220e4ebb6b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -27,6 +27,7 @@
#define DEVFREQ_POSTCHANGE (1)
struct devfreq;
+struct devfreq_governor;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -101,35 +102,6 @@ struct devfreq_dev_profile {
};
/**
- * struct devfreq_governor - Devfreq policy governor
- * @node: list node - contains registered devfreq governors
- * @name: Governor's name
- * @immutable: Immutable flag for governor. If the value is 1,
- * this govenror is never changeable to other governor.
- * @get_target_freq: Returns desired operating frequency for the device.
- * Basically, get_target_freq will run
- * devfreq_dev_profile.get_dev_status() to get the
- * status of the device (load = busy_time / total_time).
- * If no_central_polling is set, this callback is called
- * only with update_devfreq() notified by OPP.
- * @event_handler: Callback for devfreq core framework to notify events
- * to governors. Events include per device governor
- * init and exit, opp changes out of devfreq, suspend
- * and resume of per device devfreq during device idle.
- *
- * Note that the callbacks are called with devfreq->lock locked by devfreq.
- */
-struct devfreq_governor {
- struct list_head node;
-
- const char name[DEVFREQ_NAME_LEN];
- const unsigned int immutable;
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*event_handler)(struct devfreq *devfreq,
- unsigned int event, void *data);
-};
-
-/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* registered.
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index c35d0c0e0ada..4635f95000a4 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -34,6 +34,23 @@
* If get_dynamic_power() is NULL, then the
* dynamic power is calculated as
* @dyn_power_coeff * frequency * voltage^2
+ * @get_real_power: When this is set, the framework uses it to ask the
+ * device driver for the actual power.
+ * Some devices have more sophisticated methods
+ * (like power counters) to approximate the actual power
+ * that they use.
+ * This function provides more accurate data to the
+ * thermal governor. When the driver does not provide
+ * such function, framework just uses pre-calculated
+ * table and scale the power by 'utilization'
+ * (based on 'busy_time' and 'total_time' taken from
+ * devfreq 'last_status').
+ * The value returned by this function must be lower
+ * or equal than the maximum power value
+ * for the current state
+ * (which can be found in power_table[state]).
+ * When this interface is used, the power_table holds
+ * max total (static + dynamic) power value for each OPP.
*/
struct devfreq_cooling_power {
unsigned long (*get_static_power)(struct devfreq *devfreq,
@@ -41,6 +58,8 @@ struct devfreq_cooling_power {
unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
unsigned long freq,
unsigned long voltage);
+ int (*get_real_power)(struct devfreq *df, u32 *power,
+ unsigned long freq, unsigned long voltage);
unsigned long dyn_power_coeff;
};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a7e6903866fd..f4c639c0c362 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -22,11 +22,13 @@ struct bio_vec;
/*
* Type of table, mapped_device's mempool and request_queue
*/
-#define DM_TYPE_NONE 0
-#define DM_TYPE_BIO_BASED 1
-#define DM_TYPE_REQUEST_BASED 2
-#define DM_TYPE_MQ_REQUEST_BASED 3
-#define DM_TYPE_DAX_BIO_BASED 4
+enum dm_queue_mode {
+ DM_TYPE_NONE = 0,
+ DM_TYPE_BIO_BASED = 1,
+ DM_TYPE_REQUEST_BASED = 2,
+ DM_TYPE_MQ_REQUEST_BASED = 3,
+ DM_TYPE_DAX_BIO_BASED = 4,
+};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -128,13 +130,15 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* < 0 : error
* >= 0 : the number of bytes accessible at the address
*/
-typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size);
+typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn);
+#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct dax_device *dax_dev;
fmode_t mode;
char name[16];
};
@@ -176,7 +180,7 @@ struct target_type {
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
- dm_direct_access_fn direct_access;
+ dm_dax_direct_access_fn direct_access;
/* For internal device-mapper use. */
struct list_head list;
@@ -221,6 +225,18 @@ struct target_type {
*/
typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
+/*
+ * A target implements own bio data integrity.
+ */
+#define DM_TARGET_INTEGRITY 0x00000010
+#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
+
+/*
+ * A target passes integrity data to the lower device.
+ */
+#define DM_TARGET_PASSES_INTEGRITY 0x00000020
+#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -255,6 +271,12 @@ struct dm_target {
unsigned num_write_same_bios;
/*
+ * The number of WRITE ZEROES bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_write_zeroes_bios;
+
+ /*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
@@ -290,11 +312,6 @@ struct dm_target {
* on max_io_len boundary.
*/
bool split_discard_bios:1;
-
- /*
- * Set if this target does not return zeroes on discarded blocks.
- */
- bool discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */
@@ -464,7 +481,7 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
* Useful for "hybrid" target (supports both bio-based
* and request-based).
*/
-void dm_table_set_type(struct dm_table *t, unsigned type);
+void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
/*
* Finally call this to make the table ready for use.
@@ -578,6 +595,7 @@ extern struct ratelimit_state dm_ratelimit_state;
/*
* Definitions of return values from target end_io function.
*/
+#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
@@ -588,6 +606,7 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
{ \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bfb3704fc6fc..79f27d60ec66 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,13 +39,13 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @kmap_atomic: maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @kmap: maps a page from the buffer into kernel address space.
- * @kunmap: [optional] unmaps a page from the buffer.
+ * @map_atomic: maps a page from the buffer into kernel address
+ * space, users may not block until the subsequent unmap call.
+ * This callback must not sleep.
+ * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
+ * This Callback must not sleep.
+ * @map: maps a page from the buffer into kernel address space.
+ * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
@@ -206,10 +206,10 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*kmap_atomic)(struct dma_buf *, unsigned long);
- void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
- void *(*kmap)(struct dma_buf *, unsigned long);
- void (*kunmap)(struct dma_buf *, unsigned long, void *);
+ void *(*map_atomic)(struct dma_buf *, unsigned long);
+ void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
+ void *(*map)(struct dma_buf *, unsigned long);
+ void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 5900945f962d..332a5420243c 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -83,4 +83,6 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
u64 context, unsigned seqno,
bool signal_on_any);
+bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6048fa404e57..a5195a7d6f77 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
- * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
@@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
* have successfully acquire a reference to it. If it no
* longer matches, we are holding a reference to some other
* reallocated pointer. This is possible if the allocator
- * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
* fence remains valid for the RCU grace period, but it
* may be reallocated. When using such allocators, we are
* responsible for ensuring the reference we get is to
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 5725c94b1f12..4eac2670bfa1 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -20,6 +20,7 @@
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/msi.h>
@@ -71,6 +72,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
#else
@@ -100,6 +102,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
{
}
+static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+{
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __KERNEL__ */
#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0977317c6835..4f3eecedca2d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -728,6 +728,18 @@ dma_mark_declared_memory_occupied(struct device *dev,
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+#ifdef CONFIG_HAS_DMA
+int dma_configure(struct device *dev);
+void dma_deconfigure(struct device *dev);
+#else
+static inline int dma_configure(struct device *dev)
+{
+ return 0;
+}
+
+static inline void dma_deconfigure(struct device *dev) {}
+#endif
+
/*
* Managed DMA API
*/
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 187c10299722..90884072fa73 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -39,6 +39,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
+extern int intel_iommu_tboot_noforce;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 5b6adf964248..8ae0f45fafd6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -28,12 +28,10 @@ struct device;
#define EDAC_OPSTATE_INT 2
extern int edac_op_state;
-extern int edac_err_assert;
-extern atomic_t edac_handlers;
-extern int edac_handler_set(void);
-extern void edac_atomic_assert_error(void);
-extern struct bus_type *edac_get_sysfs_subsys(void);
+struct bus_type *edac_get_sysfs_subsys(void);
+int edac_get_report_status(void);
+void edac_set_report_status(int new);
enum {
EDAC_REPORTING_ENABLED,
@@ -41,28 +39,6 @@ enum {
EDAC_REPORTING_FORCE
};
-extern int edac_report_status;
-#ifdef CONFIG_EDAC
-static inline int get_edac_report_status(void)
-{
- return edac_report_status;
-}
-
-static inline void set_edac_report_status(int new)
-{
- edac_report_status = new;
-}
-#else
-static inline int get_edac_report_status(void)
-{
- return EDAC_REPORTING_DISABLED;
-}
-
-static inline void set_edac_report_status(int new)
-{
-}
-#endif
-
static inline void opstate_init(void)
{
switch (edac_op_state) {
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index 2fd3993c370b..e6f624b53c3d 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -6,6 +6,7 @@
#ifdef CONFIG_ACPI_BGRT
void efi_bgrt_init(struct acpi_table_header *table);
+int __init acpi_parse_bgrt(struct acpi_table_header *table);
/* The BGRT data itself; only valid if bgrt_image != NULL. */
extern size_t bgrt_image_size;
@@ -14,6 +15,10 @@ extern struct acpi_table_bgrt bgrt_tab;
#else /* !CONFIG_ACPI_BGRT */
static inline void efi_bgrt_init(struct acpi_table_header *table) {}
+static inline int __init acpi_parse_bgrt(struct acpi_table_header *table)
+{
+ return 0;
+}
#endif /* !CONFIG_ACPI_BGRT */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 94d34e0be24f..ec36f42a2add 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1435,9 +1435,6 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
/* prototypes shared between arch specific and generic stub code */
-#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg)
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
-
void efi_printk(efi_system_table_t *sys_table_arg, char *str);
void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
@@ -1471,7 +1468,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
-efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_parse_options(char const *cmdline);
efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index aebecc4ed088..9ec5e22846e0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -8,6 +8,9 @@
struct io_cq;
struct elevator_type;
+#ifdef CONFIG_BLK_DEBUG_FS
+struct blk_mq_debugfs_attr;
+#endif
/*
* Return values from elevator merger
@@ -93,6 +96,8 @@ struct blk_mq_hw_ctx;
struct elevator_mq_ops {
int (*init_sched)(struct request_queue *, struct elevator_type *);
void (*exit_sched)(struct elevator_queue *);
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
@@ -104,7 +109,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct blk_mq_hw_ctx *, struct request *);
+ void (*completed_request)(struct request *);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
@@ -142,6 +147,10 @@ struct elevator_type
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
bool uses_mq;
+#ifdef CONFIG_BLK_DEBUG_FS
+ const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
+ const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
+#endif
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
@@ -211,8 +220,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct elevator_queue *);
-extern int elevator_change(struct request_queue *, const char *);
+extern void elevator_exit(struct request_queue *, struct elevator_queue *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
struct elevator_type *);
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 20fa8d8ae313..ba069e8f4f78 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elf_note elf32_note
#define elf_addr_t Elf32_Off
#define Elf_Half Elf32_Half
+#define Elf_Word Elf32_Word
#else
@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elf_note elf64_note
#define elf_addr_t Elf64_Off
#define Elf_Half Elf64_Half
+#define Elf_Word Elf64_Word
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c62b709b1ce0..2d9f80848d4b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -447,21 +447,6 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
- * ether_addr_greater - Compare two Ethernet addresses
- * @addr1: Pointer to a six-byte array containing the Ethernet address
- * @addr2: Pointer other six-byte array containing the Ethernet address
- *
- * Compare two Ethernet addresses, returns true addr1 is greater than addr2
- */
-static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
-{
- u64 u1 = ether_addr_to_u64(addr1);
- u64 u2 = ether_addr_to_u64(addr2);
-
- return u1 > u2;
-}
-
-/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9ded8c6d8176..83cc9863444b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -60,6 +60,7 @@ enum ethtool_phys_id_state {
enum {
ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+ ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */
/*
* Add your fresh new hash function bits above and remember to update
@@ -73,6 +74,7 @@ enum {
#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
+#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32)
#define ETH_RSS_HASH_UNKNOWN 0
#define ETH_RSS_HASH_NO_CHANGE 0
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7010fb01a81a..7e206a9f88db 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -236,11 +236,11 @@ extern int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor every action of a notifier.
- * Registrar gets notified for every external port of a connection device.
- * Probably this could be used to debug an action of notifier; however,
- * we do not recommend to use this for normal 'notifiee' device drivers who
- * want to be notified by a specific external port of the notifier.
+ * Following APIs are to monitor the status change of the external connectors.
+ * extcon_register_notifier(*edev, id, *nb) : Register a notifier block
+ * for specific external connector of the extcon.
+ * extcon_register_notifier_all(*edev, *nb) : Register a notifier block
+ * for all supported external connectors of the extcon.
*/
extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -253,6 +253,17 @@ extern void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
+extern int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb);
+
/*
* Following API get the extcon device from devicetree.
* This function use phandle of devicetree to get extcon device directly.
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index e2d239ed4c60..b6feed6547ce 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -32,9 +32,9 @@
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
-#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
-#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
-#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
+#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
+#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
@@ -114,6 +114,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
@@ -161,7 +162,7 @@ struct f2fs_checkpoint {
*/
#define F2FS_ORPHANS_PER_BLOCK 1020
-#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
struct f2fs_orphan_block {
@@ -302,6 +303,12 @@ struct f2fs_nat_block {
#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
+ * F2FS uses 4 bytes to represent block address. As a result, supported size of
+ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ */
+#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
+
+/*
* Note that f2fs_sit_entry->vblocks has the following bit-field information.
* [15:10] : allocation type such as CURSEG_XXXX_TYPE
* [9:0] : valid block count
@@ -449,7 +456,7 @@ typedef __le32 f2fs_hash_t;
#define F2FS_SLOT_LEN 8
#define F2FS_SLOT_LEN_BITS 3
-#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
+#define GET_DENTRY_SLOTS(x) (((x) + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329e656d..1b48d9c9a561 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fbf7b39e8103..62d948f80730 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -7,6 +7,7 @@
#include <stdarg.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
#include <linux/linkage.h>
@@ -18,7 +19,9 @@
#include <net/sch_generic.h>
-#include <asm/cacheflush.h>
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+#include <asm/set_memory.h>
+#endif
#include <uapi/linux/filter.h>
#include <uapi/linux/bpf.h>
@@ -269,6 +272,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = IMM })
+/* Unconditional jumps, goto pc + off16 */
+
+#define BPF_JMP_A(OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Function call */
#define BPF_EMIT_CALL(FUNC) \
@@ -412,8 +425,7 @@ struct bpf_prog {
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
- dst_needed:1, /* Do we need dst entry? */
- xdp_adjust_head:1; /* Adjusting pkt head? */
+ dst_needed:1; /* Do we need dst entry? */
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
@@ -430,7 +442,7 @@ struct bpf_prog {
};
struct sk_filter {
- atomic_t refcnt;
+ refcount_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
@@ -693,6 +705,11 @@ static inline bool bpf_jit_is_ebpf(void)
# endif
}
+static inline bool ebpf_jit_enabled(void)
+{
+ return bpf_jit_enable && bpf_jit_is_ebpf();
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return fp->jited && bpf_jit_is_ebpf();
@@ -753,6 +770,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp);
#else /* CONFIG_BPF_JIT */
+static inline bool ebpf_jit_enabled(void)
+{
+ return false;
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 8e953c6f394a..37a5eaea69dd 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -25,7 +25,7 @@ int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index b6efb0c64408..11366b3ff0b4 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -61,16 +61,83 @@ struct flex_array {
FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
}
+/**
+ * flex_array_alloc() - Creates a flexible array.
+ * @element_size: individual object size.
+ * @total: maximum number of objects which can be stored.
+ * @flags: GFP flags
+ *
+ * Return: Returns an object of structure flex_array.
+ */
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
+
+/**
+ * flex_array_prealloc() - Ensures that memory for the elements indexed in the
+ * range defined by start and nr_elements has been allocated.
+ * @fa: array to allocate memory to.
+ * @start: start address
+ * @nr_elements: number of elements to be allocated.
+ * @flags: GFP flags
+ *
+ */
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags);
+
+/**
+ * flex_array_free() - Removes all elements of a flexible array.
+ * @fa: array to be freed.
+ */
void flex_array_free(struct flex_array *fa);
+
+/**
+ * flex_array_free_parts() - Removes all elements of a flexible array, but
+ * leaves the array itself in place.
+ * @fa: array to be emptied.
+ */
void flex_array_free_parts(struct flex_array *fa);
+
+/**
+ * flex_array_put() - Stores data into a flexible array.
+ * @fa: array where element is to be stored.
+ * @element_nr: position to copy, must be less than the maximum specified when
+ * the array was created.
+ * @src: data source to be copied into the array.
+ * @flags: GFP flags
+ *
+ * Return: Returns zero on success, a negative error code otherwise.
+ */
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
+
+/**
+ * flex_array_clear() - Clears an individual element in the array, sets the
+ * given element to FLEX_ARRAY_FREE.
+ * @element_nr: element position to clear.
+ * @fa: array to which element to be cleared belongs.
+ *
+ * Return: Returns zero on success, -EINVAL otherwise.
+ */
int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
+
+/**
+ * flex_array_get() - Retrieves data into a flexible array.
+ *
+ * @element_nr: Element position to retrieve data from.
+ * @fa: array from which data is to be retrieved.
+ *
+ * Return: Returns a pointer to the data element, or NULL if that
+ * particular element has never been allocated.
+ */
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
+
+/**
+ * flex_array_shrink() - Reduces the allocated size of an array.
+ * @fa: array to shrink.
+ *
+ * Return: Returns number of pages of memory actually freed.
+ *
+ */
int flex_array_shrink(struct flex_array *fa);
#define flex_array_put_ptr(fa, nr, src, gfp) \
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
new file mode 100644
index 000000000000..3810a9033f49
--- /dev/null
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ALT_PR_IP_CORE_H
+#define _ALT_PR_IP_CORE_H
+#include <linux/io.h>
+
+int alt_pr_register(struct device *dev, void __iomem *reg_base);
+int alt_pr_unregister(struct device *dev);
+
+#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 57beb5d09bfc..b4ac24c4411d 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -70,17 +70,21 @@ enum fpga_mgr_states {
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
+#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
/**
* struct fpga_image_info - information specific to a FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
+ * @config_complete_timeout_us: maximum time for FPGA to switch to operating
+ * status in the write_complete op.
*/
struct fpga_image_info {
u32 flags;
u32 enable_timeout_us;
u32 disable_timeout_us;
+ u32 config_complete_timeout_us;
};
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7251f7bb45e8..803e5a9b2654 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -29,7 +29,6 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
-#include <linux/percpu-rwsem.h>
#include <linux/delayed_call.h>
#include <asm/byteorder.h>
@@ -250,9 +249,8 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
-#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
+#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
+#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
* helper code (eg buffer layer)
* to clear GFP_FS from alloc */
@@ -546,6 +544,8 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
+struct fsnotify_mark_connector;
+
/*
* Keep mostly read-only and often accessed (especially for
* the RCU path lookup and 'stat' data) fields at the beginning
@@ -645,7 +645,7 @@ struct inode {
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
- struct hlist_head i_fsnotify_marks;
+ struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
@@ -909,6 +909,8 @@ static inline struct file *get_file(struct file *f)
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
+
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
* asynchronous locking.
@@ -1429,7 +1431,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
}
-extern struct timespec current_fs_time(struct super_block *sb);
extern struct timespec current_time(struct inode *inode);
/*
@@ -2121,6 +2122,9 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
+extern __printf(2, 3)
+int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
+extern int super_setup_bdi(struct super_block *sb);
extern int current_umask(void);
@@ -2921,17 +2925,19 @@ extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS);
+ return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW,
+ return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
stat, STATX_BASIC_STATS);
}
static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags)
{
- return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+ return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_fstat(int fd, struct kstat *stat)
{
@@ -2996,9 +3002,10 @@ extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
extern void make_empty_dir_inode(struct inode *inode);
extern bool is_empty_dir_inode(struct inode *inode);
-struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+struct tree_descr { const char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
+extern int simple_fill_super(struct super_block *, unsigned long,
+ const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 10c1abfbac6c..0a30c106c1e5 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -46,17 +46,6 @@ struct fscrypt_symlink_data {
char encrypted_path[1];
} __packed;
-/**
- * This function is used to calculate the disk space required to
- * store a filename of length l in encrypted symlink format.
- */
-static inline u32 fscrypt_symlink_data_len(u32 l)
-{
- if (l < FS_CRYPTO_BLOCK_SIZE)
- l = FS_CRYPTO_BLOCK_SIZE;
- return (l + sizeof(struct fscrypt_symlink_data) - 1);
-}
-
struct fscrypt_str {
unsigned char *name;
u32 len;
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 3511ca798804..ec406aed2f2f 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -147,6 +147,15 @@ static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
return -EOPNOTSUPP;
}
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
/* bio.c */
static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
struct bio *bio)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index a140f47e9b27..cd4e82c17304 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -57,6 +57,80 @@ extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
struct fscrypt_str *);
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len) \
+ ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+ FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded). This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename. Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext. (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".) This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key. Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+ u32 hash;
+ u32 minor_hash;
+ u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry. The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ if (unlikely(!fname->disk_name.name)) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+ return false;
+ if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+ return false;
+ return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+ n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+ }
+
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
/* bio.c */
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index e6e689b5569e..c6c69318752b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -80,6 +80,7 @@ struct fsnotify_event;
struct fsnotify_mark;
struct fsnotify_event_private_data;
struct fsnotify_fname;
+struct fsnotify_iter_info;
/*
* Each group much define these ops. The fsnotify infrastructure will call
@@ -98,10 +99,13 @@ struct fsnotify_ops {
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, const void *data, int data_type,
- const unsigned char *file_name, u32 cookie);
+ const unsigned char *file_name, u32 cookie,
+ struct fsnotify_iter_info *iter_info);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_event *event);
+ /* called on final put+free to free memory */
+ void (*free_mark)(struct fsnotify_mark *mark);
};
/*
@@ -163,6 +167,8 @@ struct fsnotify_group {
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
+ atomic_t user_waits; /* Number of tasks waiting for user
+ * response */
/* groups can define private fields here or use the void *private */
union {
@@ -195,6 +201,30 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
+ * Inode / vfsmount point to this structure which tracks all marks attached to
+ * the inode / vfsmount. The reference to inode / vfsmount is held by this
+ * structure. We destroy this structure when there are no more marks attached
+ * to it. The structure is protected by fsnotify_mark_srcu.
+ */
+struct fsnotify_mark_connector {
+ spinlock_t lock;
+#define FSNOTIFY_OBJ_TYPE_INODE 0x01
+#define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02
+#define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+ unsigned int flags; /* Type of object [lock] */
+ union { /* Object pointer [lock] */
+ struct inode *inode;
+ struct vfsmount *mnt;
+ };
+ union {
+ struct hlist_head list;
+ /* Used listing heads to free after srcu period expires */
+ struct fsnotify_mark_connector *destroy_next;
+ };
+};
+
+/*
* A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
@@ -223,22 +253,16 @@ struct fsnotify_mark {
struct list_head g_list;
/* Protects inode / mnt pointers, flags, masks */
spinlock_t lock;
- /* List of marks for inode / vfsmount [obj_lock] */
+ /* List of marks for inode / vfsmount [connector->lock, mark ref] */
struct hlist_node obj_list;
- union { /* Object pointer [mark->lock, group->mark_mutex] */
- struct inode *inode; /* inode this mark is associated with */
- struct vfsmount *mnt; /* vfsmount this mark is associated with */
- };
+ /* Head of list of marks for an object [mark ref] */
+ struct fsnotify_mark_connector *connector;
/* Events types to ignore [mark->lock, group->mark_mutex] */
__u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_INODE 0x01
-#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
-#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
unsigned int flags; /* flags [mark->lock] */
- void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
@@ -315,23 +339,18 @@ extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group
/* functions used to manipulate the marks attached to inodes */
-/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */
-extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
-/* run all marks associated with an inode and update inode->i_fsnotify_mask */
-extern void fsnotify_recalc_inode_mask(struct inode *inode);
-extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
-/* find (and take a reference) to a mark associated with group and inode */
-extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
-/* find (and take a reference) to a mark associated with group and vfsmount */
-extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
-/* set the ignored_mask of a mark */
-extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
-/* set the mask of a mark (might pin the object into memory */
-extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
-/* attach the mark to both the group and the inode */
-extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
- struct inode *inode, struct vfsmount *mnt, int allow_dups);
-extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group,
+/* Calculate mask of events for a list of marks */
+extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
+extern void fsnotify_init_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group);
+/* Find mark belonging to given group in the list of marks */
+extern struct fsnotify_mark *fsnotify_find_mark(
+ struct fsnotify_mark_connector __rcu **connp,
+ struct fsnotify_group *group);
+/* attach the mark to the inode or vfsmount */
+extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
+ struct vfsmount *mnt, int allow_dups);
+extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -340,15 +359,23 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
+/* run all the marks in a group, and clear all of the marks attached to given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
/* run all the marks in a group, and clear all of the vfsmount marks */
-extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
+static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
+}
/* run all the marks in a group, and clear all of the inode marks */
-extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
-/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
-extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
+static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
+}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct super_block *sb);
+extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
+extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
/* put here because inotify does some weird stuff when destroying watches */
extern void fsnotify_init_event(struct fsnotify_event *event,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3633e8beff39..473f088aabea 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -42,8 +42,10 @@
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
void trace_init(void);
+void early_trace_init(void);
#else
static inline void trace_init(void) { }
+static inline void early_trace_init(void) { }
#endif
struct module;
@@ -70,7 +72,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
- * Changing those attribute flags after regsitering ftrace_ops will
+ * Changing those attribute flags after registering ftrace_ops will
* cause unexpected results.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
@@ -144,6 +146,10 @@ struct ftrace_ops_hash {
struct ftrace_hash *filter_hash;
struct mutex regex_lock;
};
+
+void ftrace_free_init_mem(void);
+#else
+static inline void ftrace_free_init_mem(void) { }
#endif
/*
@@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void)
}
static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
+static inline void ftrace_free_init_mem(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER
@@ -279,15 +286,45 @@ int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-#endif
-struct ftrace_func_command {
- struct list_head list;
- char *name;
- int (*func)(struct ftrace_hash *hash,
- char *func, char *cmd,
- char *params, int enable);
-};
+/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
+DECLARE_PER_CPU(int, disable_stack_tracer);
+
+/**
+ * stack_tracer_disable - temporarily disable the stack tracer
+ *
+ * There's a few locations (namely in RCU) where stack tracing
+ * cannot be executed. This function is used to disable stack
+ * tracing during those critical sections.
+ *
+ * This function must be called with preemption or interrupts
+ * disabled and stack_tracer_enable() must be called shortly after
+ * while preemption or interrupts are still disabled.
+ */
+static inline void stack_tracer_disable(void)
+{
+ /* Preemption or interupts must be disabled */
+ if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
+ this_cpu_inc(disable_stack_tracer);
+}
+
+/**
+ * stack_tracer_enable - re-enable the stack tracer
+ *
+ * After stack_tracer_disable() is called, stack_tracer_enable()
+ * must be called shortly afterward.
+ */
+static inline void stack_tracer_enable(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
+ this_cpu_dec(disable_stack_tracer);
+}
+#else
+static inline void stack_tracer_disable(void) { }
+static inline void stack_tracer_enable(void) { }
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file;
-struct ftrace_probe_ops {
- void (*func)(unsigned long ip,
- unsigned long parent_ip,
- void **data);
- int (*init)(struct ftrace_probe_ops *ops,
- unsigned long ip, void **data);
- void (*free)(struct ftrace_probe_ops *ops,
- unsigned long ip, void **data);
- int (*print)(struct seq_file *m,
- unsigned long ip,
- struct ftrace_probe_ops *ops,
- void *data);
-};
-
-extern int
-register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
- void *data);
-extern void
-unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
- void *data);
-extern void
-unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
-extern void unregister_ftrace_function_probe_all(char *glob);
-
extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
@@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
-int register_ftrace_command(struct ftrace_func_command *cmd);
-int unregister_ftrace_command(struct ftrace_func_command *cmd);
-
enum {
FTRACE_UPDATE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1),
@@ -433,8 +443,8 @@ enum {
FTRACE_ITER_FILTER = (1 << 0),
FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL = (1 << 2),
- FTRACE_ITER_DO_HASH = (1 << 3),
- FTRACE_ITER_HASH = (1 << 4),
+ FTRACE_ITER_DO_PROBES = (1 << 3),
+ FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_ENABLED = (1 << 5),
};
@@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
-static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
-{
- return -EINVAL;
-}
-static inline __init int unregister_ftrace_command(char *cmd_name)
-{
- return -EINVAL;
-}
static inline int ftrace_text_reserved(const void *start, const void *end)
{
return 0;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 8bd28ce6d76e..3dff2398a5f0 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -27,4 +27,16 @@ struct fwnode_handle {
struct fwnode_handle *secondary;
};
+/**
+ * struct fwnode_endpoint - Fwnode graph endpoint
+ * @port: Port number
+ * @id: Endpoint id
+ * @local_fwnode: reference to the related fwnode
+ */
+struct fwnode_endpoint {
+ unsigned int port;
+ unsigned int id;
+ const struct fwnode_handle *local_fwnode;
+};
+
#endif
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 76f39754e7b0..acff9437e5c3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -159,11 +159,11 @@ struct badblocks;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct blk_integrity {
- struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -722,11 +722,9 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
extern void blk_integrity_add(struct gendisk *);
extern void blk_integrity_del(struct gendisk *);
-extern void blk_integrity_revalidate(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void blk_integrity_add(struct gendisk *disk) { }
static inline void blk_integrity_del(struct gendisk *disk) { }
-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index db373b9d3223..2b1a44f5bdb6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -40,6 +40,11 @@ struct vm_area_struct;
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_WRITE 0x800000u
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP 0x4000000u
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -179,8 +184,11 @@ struct vm_area_struct;
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 25
+#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -202,8 +210,16 @@ struct vm_area_struct;
*
* GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
*
* GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
*
* GFP_USER is for userspace allocations that also need to be directly
* accessibly by the kernel or hardware. It is typically used by hardware
@@ -297,8 +313,8 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
/*
* GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
- * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
- * and there are 16 of them to cover all possible combinations of
+ * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT
+ * bits long and there are 16 of them to cover all possible combinations of
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
*
* The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 933d93656605..8f702fcbe485 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -170,14 +170,14 @@ static inline struct gpio_desc *__must_check
gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_desc *__must_check
gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_descs *__must_check
@@ -191,7 +191,7 @@ static inline struct gpio_descs *__must_check
gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline void gpiod_put(struct gpio_desc *desc)
@@ -231,14 +231,14 @@ static inline struct gpio_desc *__must_check
devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_descs *__must_check
@@ -252,7 +252,7 @@ static inline struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 846f3b989480..393582867afd 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -168,7 +168,7 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
- int irq_chained_parent;
+ unsigned int irq_chained_parent;
bool irq_nested;
bool irq_need_valid_mask;
unsigned long *irq_valid_mask;
@@ -244,12 +244,12 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq,
+ unsigned int parent_irq,
irq_flow_handler_t parent_handler);
void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq);
+ unsigned int parent_irq);
int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
new file mode 100644
index 000000000000..90e0b9060e6d
--- /dev/null
+++ b/include/linux/gpio/gpio-reg.h
@@ -0,0 +1,13 @@
+#ifndef GPIO_REG_H
+#define GPIO_REG_H
+
+struct device;
+struct irq_domain;
+
+struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
+ int base, int num, const char *label, u32 direction, u32 def_out,
+ const char *const *names, struct irq_domain *irqdom, const int *irqs);
+
+int gpio_reg_resume(struct gpio_chip *gc);
+
+#endif
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index edbb4fc674ed..d271ff23984f 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -35,6 +35,7 @@ enum hdmi_infoframe_type {
};
#define HDMI_IEEE_OUI 0x000c03
+#define HDMI_FORUM_IEEE_OUI 0xc45dd8
#define HDMI_INFOFRAME_HEADER_SIZE 4
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 7ef111d3ecc5..f32d7c392c1e 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,8 @@ struct hid_sensor_common {
unsigned usage_id;
atomic_t data_ready;
atomic_t user_requested_state;
+ int poll_interval;
+ int raw_hystersis;
struct iio_trigger *trigger;
int timestamp_ns_scale;
struct hid_sensor_hub_attribute_info poll;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 30c7dc45e45f..761f86242473 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -45,6 +45,14 @@
#define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430
#define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431
+/* Tempreture (200033) */
+#define HID_USAGE_SENSOR_TEMPERATURE 0x200033
+#define HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE 0x200434
+
+/* humidity */
+#define HID_USAGE_SENSOR_HUMIDITY 0x200032
+#define HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY 0x200433
+
/* Gyro 3D: (200076) */
#define HID_USAGE_SENSOR_GYRO_3D 0x200076
#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 28f38e2b8f30..5be325d890d9 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -268,6 +268,8 @@ struct hid_item {
#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
+#define HID_DG_DEVICECONFIG 0x000d000e
+#define HID_DG_DEVICESETTINGS 0x000d0023
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -322,7 +324,7 @@ struct hid_item {
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
-#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
+/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL 0x00000400
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
@@ -541,7 +543,6 @@ struct hid_device { /* device report descriptor */
struct list_head inputs; /* The list of inputs */
void *hiddev; /* The hiddev structure */
void *hidraw;
- int minor; /* Hiddev minor number */
int open; /* is the device open by anyone? */
char name[128]; /* Device name */
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h
index a5dd8148660b..921622222957 100644
--- a/include/linux/hiddev.h
+++ b/include/linux/hiddev.h
@@ -32,6 +32,18 @@
* In-kernel definitions.
*/
+struct hiddev {
+ int minor;
+ int exist;
+ int open;
+ struct mutex existancelock;
+ wait_queue_head_t wait;
+ struct hid_device *hid;
+ struct list_head list;
+ spinlock_t list_lock;
+ bool initialized;
+};
+
struct hid_device;
struct hid_usage;
struct hid_field;
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 1ffbf2a8cb99..3d04aa1dc83e 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -26,6 +26,7 @@ enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1,
HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52,
+ HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
};
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 249e579ecd4c..8c5b10eb7265 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -276,8 +276,6 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
return timer->base->cpu_base->hres_active;
}
-extern void hrtimer_peek_ahead_timers(void);
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -300,8 +298,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline void hrtimer_peek_ahead_timers(void) { }
-
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
@@ -456,7 +452,7 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
}
/* Precise sleep: */
-extern long hrtimer_nanosleep(struct timespec *rqtp,
+extern long hrtimer_nanosleep(struct timespec64 *rqtp,
struct timespec __user *rmtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 88b673749121..ceb751987c40 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -337,7 +337,7 @@ struct hwmon_ops {
int (*read)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val);
int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, char **str);
+ u32 attr, int channel, const char **str);
int (*write)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val);
};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 970771a5f739..e09fc8290c2f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -491,6 +491,12 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
+static inline u32
+hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
+{
+ return rbi->ring_buffer->pending_send_sz;
+}
+
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -524,10 +530,10 @@ struct vmbus_channel_open_channel {
u32 target_vp;
/*
- * The upstream ring buffer begins at offset zero in the memory
- * described by RingBufferGpadlHandle. The downstream ring buffer
- * follows it at this offset (in pages).
- */
+ * The upstream ring buffer begins at offset zero in the memory
+ * described by RingBufferGpadlHandle. The downstream ring buffer
+ * follows it at this offset (in pages).
+ */
u32 downstream_ringbuffer_pageoffset;
/* User-specific data to be passed along to the server endpoint. */
@@ -1013,7 +1019,7 @@ extern int vmbus_open(struct vmbus_channel *channel,
u32 recv_ringbuffersize,
void *userdata,
u32 userdatalen,
- void(*onchannel_callback)(void *context),
+ void (*onchannel_callback)(void *context),
void *context);
extern void vmbus_close(struct vmbus_channel *channel);
@@ -1155,6 +1161,17 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
return dev_get_drvdata(&dev->device);
}
+struct hv_ring_buffer_debug_info {
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
+
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
+
/* Vmbus interface */
#define vmbus_driver_register(driver) \
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1428,7 +1445,7 @@ struct hyperv_service_callback {
char *log_msg;
uuid_le data;
struct vmbus_channel *channel;
- void (*callback) (void *context);
+ void (*callback)(void *context);
};
#define MAX_SRV_VER 0x7ffffff
@@ -1504,16 +1521,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
if (cached_write_sz < pending_sz)
vmbus_setevent(channel);
-
- return;
-}
-
-static inline void
-init_cached_read_index(struct vmbus_channel *channel)
-{
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- rbi->cached_read_index = rbi->ring_buffer->read_index;
}
/*
@@ -1549,76 +1556,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
-#define VMBUS_PKT_TRAILER 8
-static inline struct vmpacket_descriptor *
-get_next_pkt_raw(struct vmbus_channel *channel)
+/* Get data payload associated with descriptor */
+static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 priv_read_loc = ring_info->priv_read_index;
- void *ring_buffer = hv_get_ring_buffer(ring_info);
- u32 dsize = ring_info->ring_datasize;
- /*
- * delta is the difference between what is available to read and
- * what was already consumed in place. We commit read index after
- * the whole batch is processed.
- */
- u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
- priv_read_loc - ring_info->ring_buffer->read_index :
- (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
- u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
-
- if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
- return NULL;
-
- return ring_buffer + priv_read_loc;
+ return (void *)((unsigned long)desc + (desc->offset8 << 3));
}
-/*
- * A helper function to step through packets "in-place"
- * This API is to be called after each successful call
- * get_next_pkt_raw().
- */
-static inline void put_pkt_raw(struct vmbus_channel *channel,
- struct vmpacket_descriptor *desc)
+/* Get data size associated with descriptor */
+static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 packetlen = desc->len8 << 3;
- u32 dsize = ring_info->ring_datasize;
-
- /*
- * Include the packet trailer.
- */
- ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
- ring_info->priv_read_index %= dsize;
+ return (desc->len8 << 3) - (desc->offset8 << 3);
}
+
+struct vmpacket_descriptor *
+hv_pkt_iter_first(struct vmbus_channel *channel);
+
+struct vmpacket_descriptor *
+__hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt);
+
+void hv_pkt_iter_close(struct vmbus_channel *channel);
+
/*
- * This call commits the read index and potentially signals the host.
- * Here is the pattern for using the "in-place" consumption APIs:
- *
- * init_cached_read_index();
- *
- * while (get_next_pkt_raw() {
- * process the packet "in-place";
- * put_pkt_raw();
- * }
- * if (packets processed in place)
- * commit_rd_index();
+ * Get next packet descriptor from iterator
+ * If at end of list, return NULL and update host.
*/
-static inline void commit_rd_index(struct vmbus_channel *channel)
+static inline struct vmpacket_descriptor *
+hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- virt_rmb();
- ring_info->ring_buffer->read_index = ring_info->priv_read_index;
+ struct vmpacket_descriptor *nxt;
+
+ nxt = __hv_pkt_iter_next(channel, pkt);
+ if (!nxt)
+ hv_pkt_iter_close(channel);
- hv_signal_on_read(channel);
+ return nxt;
}
+#define foreach_vmbus_pkt(pkt, channel) \
+ for (pkt = hv_pkt_iter_first(channel); pkt; \
+ pkt = hv_pkt_iter_next(channel, pkt))
#endif /* _HYPERV_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c5bd8b8daf97..72d0ece70ed3 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -150,6 +150,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
+ * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -213,6 +214,8 @@ struct i2c_driver {
int (*detect)(struct i2c_client *, struct i2c_board_info *);
const unsigned short *address_list;
struct list_head clients;
+
+ bool disable_i2c_core_irq_mapping;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -829,11 +832,18 @@ static inline const struct of_device_id
#if IS_ENABLED(CONFIG_ACPI)
u32 i2c_acpi_find_bus_speed(struct device *dev);
+struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
+ struct i2c_board_info *info);
#else
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index, struct i2c_board_info *info)
+{
+ return NULL;
+}
#endif /* CONFIG_ACPI */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/i2c-hid.h b/include/linux/i2c/i2c-hid.h
index 7aa901d92058..1fb088239d12 100644
--- a/include/linux/i2c/i2c-hid.h
+++ b/include/linux/i2c/i2c-hid.h
@@ -14,9 +14,13 @@
#include <linux/types.h>
+struct regulator;
+
/**
* struct i2chid_platform_data - used by hid over i2c implementation.
* @hid_descriptor_address: i2c register where the HID descriptor is stored.
+ * @supply: regulator for powering on the device.
+ * @post_power_delay_ms: delay after powering on before device is usable.
*
* Note that it is the responsibility of the platform driver (or the acpi 5.0
* driver, or the flattened device tree) to setup the irq related to the gpio in
@@ -31,6 +35,8 @@
*/
struct i2c_hid_platform_data {
u16 hid_descriptor_address;
+ struct regulator *supply;
+ int post_power_delay_ms;
};
#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 2f51c1724b5a..6980ca322074 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -88,7 +88,7 @@ static inline bool ata_pm_request(struct request *rq)
ide_req(rq)->type == ATA_PRIV_PM_RESUME);
}
-/* Error codes returned in rq->errors to the higher part of the driver. */
+/* Error codes returned in result to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
IDE_DRV_ERROR_FILEMARK = 102,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0dd9498c694f..69033353d0d1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -7,7 +7,7 @@
* Copyright (c) 2005, Devicescape Software, Inc.
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (c) 2016 Intel Deutschland GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1411,6 +1411,8 @@ struct ieee80211_ht_operation {
#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
+#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
/* for stbc_param */
#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
@@ -1525,14 +1527,14 @@ enum ieee80211_vht_chanwidth {
* This structure is the "VHT operation element" as
* described in 802.11ac D3.0 8.4.2.161
* @chan_width: Operating channel width
+ * @center_freq_seg0_idx: center freq segment 0 index
* @center_freq_seg1_idx: center freq segment 1 index
- * @center_freq_seg2_idx: center freq segment 2 index
* @basic_mcs_set: VHT Basic MCS rate set
*/
struct ieee80211_vht_operation {
u8 chan_width;
+ u8 center_freq_seg0_idx;
u8 center_freq_seg1_idx;
- u8 center_freq_seg2_idx;
__le16 basic_mcs_set;
} __packed;
@@ -1721,6 +1723,9 @@ enum ieee80211_statuscode {
WLAN_STATUS_REJECT_DSE_BAND = 96,
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+ /* 802.11ai */
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
};
@@ -2102,6 +2107,12 @@ enum ieee80211_key_len {
#define FILS_NONCE_LEN 16
#define FILS_MAX_KEK_LEN 64
+#define FILS_ERP_MAX_USERNAME_LEN 16
+#define FILS_ERP_MAX_REALM_LEN 253
+#define FILS_ERP_MAX_RRK_LEN 64
+
+#define PMK_MAX_LEN 48
+
/* Public action codes */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
@@ -2166,37 +2177,37 @@ enum ieee80211_tdls_actioncode {
#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
/**
- * enum - mesh synchronization method identifier
+ * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
*
* @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
* @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
* that will be specified in a vendor specific information element
*/
-enum {
+enum ieee80211_mesh_sync_method {
IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
IEEE80211_SYNC_METHOD_VENDOR = 255,
};
/**
- * enum - mesh path selection protocol identifier
+ * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
*
* @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
* @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
* be specified in a vendor specific information element
*/
-enum {
+enum ieee80211_mesh_path_protocol {
IEEE80211_PATH_PROTOCOL_HWMP = 1,
IEEE80211_PATH_PROTOCOL_VENDOR = 255,
};
/**
- * enum - mesh path selection metric identifier
+ * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
*
* @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
* @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
* specified in a vendor specific information element
*/
-enum {
+enum ieee80211_mesh_path_metric {
IEEE80211_PATH_METRIC_AIRTIME = 1,
IEEE80211_PATH_METRIC_VENDOR = 255,
};
@@ -2305,6 +2316,32 @@ struct ieee80211_timeout_interval_ie {
__le32 value;
} __packed;
+/**
+ * enum ieee80211_idle_options - BSS idle options
+ * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN
+ * protected frame to the AP to reset the idle timer at the AP for
+ * the station.
+ */
+enum ieee80211_idle_options {
+ WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0),
+};
+
+/**
+ * struct ieee80211_bss_max_idle_period_ie
+ *
+ * This structure refers to "BSS Max idle period element"
+ *
+ * @max_idle_period: indicates the time period during which a station can
+ * refrain from transmitting frames to its associated AP without being
+ * disassociated. In units of 1000 TUs.
+ * @idle_options: indicates the options associated with the BSS idle capability
+ * as specified in &enum ieee80211_idle_options.
+ */
+struct ieee80211_bss_max_idle_period_ie {
+ __le16 max_idle_period;
+ u8 idle_options;
+} __packed;
+
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -2345,13 +2382,21 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
/* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
-#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
-#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
-#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
-#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
-#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
-#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3)
+#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4)
+#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11)
+#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
#define WLAN_MAX_KEY_LEN 32
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c5847dc75a93..0c16866a7aac 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -48,6 +48,7 @@ struct br_ip_list {
#define BR_MCAST_FLOOD BIT(11)
#define BR_MULTICAST_TO_UNICAST BIT(12)
#define BR_VLAN_TUNNEL BIT(13)
+#define BR_BCAST_FLOOD BIT(14)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8d5fcd6284ce..283dc2f5364d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
netdev_features_t features)
{
- if (skb_vlan_tagged_multi(skb))
- features = netdev_intersect_features(features,
- NETIF_F_SG |
- NETIF_F_HIGHDMA |
- NETIF_F_FRAGLIST |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
+ if (skb_vlan_tagged_multi(skb)) {
+ /* In the case of multi-tagged packets, use a direct mask
+ * instead of using netdev_interesect_features(), to make
+ * sure that only devices supporting NETIF_F_HW_CSUM will
+ * have checksum offloading support.
+ */
+ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ }
return features;
}
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 4cca05c9678e..636ebe87e6f8 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -43,6 +43,8 @@
#define _LINUX_INET_H
#include <linux/types.h>
+#include <net/net_namespace.h>
+#include <linux/socket.h>
/*
* These mimic similar macros defined in user-space for inet_ntop(3).
@@ -54,4 +56,8 @@
extern __be32 in_aton(const char *str);
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+
+extern int inet_pton_with_scope(struct net *net, unsigned short af,
+ const char *src, const char *port, struct sockaddr_storage *addr);
+
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ee971f335a8b..a2e9d6ea1349 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -153,8 +153,8 @@ struct in_ifaddr {
int register_inetaddr_notifier(struct notifier_block *nb);
int unregister_inetaddr_notifier(struct notifier_block *nb);
-void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int event, int type,
+ int ifindex, struct ipv4_devconf *devconf);
struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
diff --git a/include/linux/init.h b/include/linux/init.h
index 79af0962fd52..94769d687cf0 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,7 +39,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold notrace __latent_entropy
+#define __init __section(.init.text) __cold __inittrace __latent_entropy
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
@@ -68,8 +68,10 @@
#ifdef MODULE
#define __exitused
+#define __inittrace notrace
#else
#define __exitused __used
+#define __inittrace
#endif
#define __exit __section(.exit.text) __exitused __cold notrace
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 91d9049f0039..e049526bc188 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,7 @@
#include <linux/sched/autogroup.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
+#include <linux/livepatch.h>
#include <linux/mm_types.h>
#include <asm/thread_info.h>
@@ -181,6 +182,7 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
+ .pi_top_task = NULL, \
.pi_waiters_leftmost = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
@@ -202,6 +204,13 @@ extern struct cred init_cred;
# define INIT_KASAN(tsk)
#endif
+#ifdef CONFIG_LIVEPATCH
+# define INIT_LIVEPATCH(tsk) \
+ .patch_state = KLP_UNDEFINED,
+#else
+# define INIT_LIVEPATCH(tsk)
+#endif
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define INIT_TASK_TI(tsk) \
.thread_info = INIT_THREAD_INFO(tsk), \
@@ -210,6 +219,12 @@ extern struct cred init_cred;
# define INIT_TASK_TI(tsk)
#endif
+#ifdef CONFIG_SECURITY
+#define INIT_TASK_SECURITY .security = NULL,
+#else
+#define INIT_TASK_SECURITY
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -288,6 +303,8 @@ extern struct cred init_cred;
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
+ INIT_LIVEPATCH(tsk) \
+ INIT_TASK_SECURITY \
}
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c573a52ae440..485a5b48f038 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -30,6 +30,8 @@
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -72,24 +74,8 @@
#define OFFSET_STRIDE (9)
-#ifdef CONFIG_64BIT
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
-#else
-static inline u64 dmar_readq(void __iomem *addr)
-{
- u32 lo, hi;
- lo = readl(addr);
- hi = readl(addr + 4);
- return (((u64) hi) << 32) + lo;
-}
-
-static inline void dmar_writeq(void __iomem *addr, u64 val)
-{
- writel((u32)val, addr);
- writel((u32)(val >> 32), addr + 4);
-}
-#endif
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 53144e78a369..a6fba4804672 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -155,7 +155,7 @@ extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
-extern void free_irq(unsigned int, void *);
+extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
struct device;
diff --git a/include/linux/io.h b/include/linux/io.h
index 82ef36eac8a1..2195d9ea4aaa 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -90,6 +90,27 @@ void devm_memunmap(struct device *dev, void *addr);
void *__devm_memremap_pages(struct device *dev, struct resource *res);
+#ifdef CONFIG_PCI
+/*
+ * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
+ * Posting") mandate non-posted configuration transactions. There is
+ * no ioremap API in the kernel that can guarantee non-posted write
+ * semantics across arches so provide a default implementation for
+ * mapping PCI config space that defaults to ioremap_nocache(); arches
+ * should override it if they have memory mapping implementations that
+ * guarantee non-posted writes semantics to make the memory mapping
+ * compliant with the PCI specification.
+ */
+#ifndef pci_remap_cfgspace
+#define pci_remap_cfgspace pci_remap_cfgspace
+static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
+ size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7291810067eb..f753e788da31 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -41,6 +41,7 @@ struct iomap {
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
+ struct dax_device *dax_dev; /* dax_dev for dax operations */
};
/*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2e4de0deee53..2cb54adc4a33 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -19,12 +19,12 @@
#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <trace/events/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -32,10 +32,13 @@
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
/*
- * This is to make the IOMMU API setup privileged
- * mapppings accessible by the master only at higher
- * privileged execution level and inaccessible at
- * less privileged levels.
+ * Where the bus hardware includes a privilege level as part of its access type
+ * markings, and certain devices are capable of issuing transactions marked as
+ * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
+ * given permission flags only apply to accesses at the higher privilege level,
+ * and that unprivileged transactions should have as little access as possible.
+ * This would usually imply the same permissions as kernel mappings on the CPU,
+ * if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
@@ -336,46 +339,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
int prot);
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
-/**
- * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
- * @domain: the iommu domain where the fault has happened
- * @dev: the device where the fault has happened
- * @iova: the faulting address
- * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
- *
- * This function should be called by the low-level IOMMU implementations
- * whenever IOMMU faults happen, to allow high-level users, that are
- * interested in such events, to know about them.
- *
- * This event may be useful for several possible use cases:
- * - mere logging of the event
- * - dynamic TLB/PTE loading
- * - if restarting of the faulting device is required
- *
- * Returns 0 on success and an appropriate error code otherwise (if dynamic
- * PTE/TLB loading will one day be supported, implementations will be able
- * to tell whether it succeeded or not according to this return value).
- *
- * Specifically, -ENOSYS is returned if a fault handler isn't installed
- * (though fault handlers can also return -ENOSYS, in case they want to
- * elicit the default behavior of the IOMMU drivers).
- */
-static inline int report_iommu_fault(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags)
-{
- int ret = -ENOSYS;
-
- /*
- * if upper layers showed interest and installed a fault handler,
- * invoke it.
- */
- if (domain->handler)
- ret = domain->handler(domain, dev, iova, flags,
- domain->handler_token);
- trace_io_page_fault(dev, iova, flags);
- return ret;
-}
+extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags);
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
diff --git a/include/linux/iova.h b/include/linux/iova.h
index f27bb2c62fca..e0a892ae45c0 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -82,6 +82,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
+#if IS_ENABLED(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
@@ -106,5 +107,95 @@ void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+#else
+static inline int iova_cache_get(void)
+{
+ return -ENOTSUPP;
+}
+
+static inline void iova_cache_put(void)
+{
+}
+
+static inline struct iova *alloc_iova_mem(void)
+{
+ return NULL;
+}
+
+static inline void free_iova_mem(struct iova *iova)
+{
+}
+
+static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+}
+
+static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+}
+
+static inline struct iova *alloc_iova(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned)
+{
+ return NULL;
+}
+
+static inline void free_iova_fast(struct iova_domain *iovad,
+ unsigned long pfn,
+ unsigned long size)
+{
+}
+
+static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn)
+{
+ return 0;
+}
+
+static inline struct iova *reserve_iova(struct iova_domain *iovad,
+ unsigned long pfn_lo,
+ unsigned long pfn_hi)
+{
+ return NULL;
+}
+
+static inline void copy_reserved_iova(struct iova_domain *from,
+ struct iova_domain *to)
+{
+}
+
+static inline void init_iova_domain(struct iova_domain *iovad,
+ unsigned long granule,
+ unsigned long start_pfn,
+ unsigned long pfn_32bit)
+{
+}
+
+static inline struct iova *find_iova(struct iova_domain *iovad,
+ unsigned long pfn)
+{
+ return NULL;
+}
+
+static inline void put_iova_domain(struct iova_domain *iovad)
+{
+}
+
+static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
+ struct iova *iova,
+ unsigned long pfn_lo,
+ unsigned long pfn_hi)
+{
+ return NULL;
+}
+
+static inline void free_cpu_cached_iovas(unsigned int cpu,
+ struct iova_domain *iovad)
+{
+}
+#endif
#endif
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 9d84942ae2e5..71fd92d81b26 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -8,8 +8,7 @@
#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
/* used by in-kernel data structures */
-struct kern_ipc_perm
-{
+struct kern_ipc_perm {
spinlock_t lock;
bool deleted;
int id;
@@ -18,9 +17,9 @@ struct kern_ipc_perm
kgid_t gid;
kuid_t cuid;
kgid_t cgid;
- umode_t mode;
+ umode_t mode;
unsigned long seq;
void *security;
-};
+} ____cacheline_aligned_in_smp;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 71be5b330d21..e1b442996f81 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -37,6 +37,7 @@ struct ipv6_devconf {
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
#ifdef CONFIG_IPV6_ROUTE_INFO
+ __s32 accept_ra_rt_info_min_plen;
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
@@ -70,6 +71,7 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
+ __s32 disable_policy;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 97cbca19430d..fffb91202bc9 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -132,6 +132,9 @@
#define GIC_BASER_SHAREABILITY(reg, type) \
(GIC_BASER_##type << reg##_SHAREABILITY_SHIFT)
+/* encode a size field of width @w containing @n - 1 units */
+#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0))
+
#define GICR_PROPBASER_SHAREABILITY_SHIFT (10)
#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7)
#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56)
@@ -156,6 +159,8 @@
#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12))
+#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16))
#define GICR_PENDBASER_SHAREABILITY_SHIFT (10)
#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7)
@@ -232,12 +237,18 @@
#define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_IIDR_REV_SHIFT 12
+#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
+#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf)
+#define GITS_IIDR_PRODUCTID_SHIFT 24
+
#define GITS_CBASER_VALID (1ULL << 63)
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
@@ -290,6 +301,7 @@
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
@@ -337,9 +349,11 @@
#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
#define E_ITS_MAPD_DEVICE_OOR 0x010801
+#define E_ITS_MAPD_ITTSIZE_OOR 0x010802
#define E_ITS_MAPC_PROCNUM_OOR 0x010902
#define E_ITS_MAPC_COLLECTION_OOR 0x010903
#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04
+#define E_ITS_MAPTI_ID_OOR 0x010a05
#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06
#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07
#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
#define GICH_MISR_EOI (1 << 0)
#define GICH_MISR_U (1 << 1)
+#define GICV_PMR_PRIORITY_SHIFT 3
+#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
+
#ifndef __ASSEMBLY__
#include <linux/irqdomain.h>
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 7b49c71c968b..2b0e56619e53 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -258,7 +258,6 @@ extern unsigned int gic_present;
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, unsigned int cpu_vec,
unsigned int irqbase);
-extern void gic_clocksource_init(unsigned int);
extern u64 gic_read_count(void);
extern unsigned int gic_get_count_width(void);
extern u64 gic_read_compare(void);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index dfaa1f4dcb0c..606b6bce3a5b 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -491,6 +491,8 @@ struct jbd2_journal_handle
unsigned long h_start_jiffies;
unsigned int h_requested_credits;
+
+ unsigned int saved_alloc_context;
};
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 624215cebee5..36872fbb815d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
+#include <linux/cache.h>
#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -63,19 +64,13 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/* some arch's have a small-data section that can be accessed register-relative
- * but that can only take up to, say, 4-byte variables. jiffies being part of
- * an 8-byte variable may not be correctly accessed unless we force the issue
- */
-#define __jiffy_data __attribute__((section(".data")))
-
/*
* The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
-extern u64 __jiffy_data jiffies_64;
-extern unsigned long volatile __jiffy_data jiffies;
+extern u64 __cacheline_aligned_in_smp jiffies_64;
+extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
index 22a72198c14b..4e80f3a9ad58 100644
--- a/include/linux/kbuild.h
+++ b/include/linux/kbuild.h
@@ -2,14 +2,14 @@
#define __LINUX_KBUILD_H
#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+ asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val))
-#define BLANK() asm volatile("\n->" : : )
+#define BLANK() asm volatile("\n.ascii \"->\"" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem))
#define COMMENT(x) \
- asm volatile("\n->#" x)
+ asm volatile("\n.ascii \"->#" x "\"")
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 4c26dc3a8295..13bc08aba704 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -47,6 +47,7 @@
/* @a is a power of 2 value */
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
@@ -438,6 +439,7 @@ extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern bool parse_option_str(const char *str, const char *option);
+extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d419d0e51fe5..c9481ebcbc0c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -14,17 +14,15 @@
#if !defined(__ASSEMBLY__)
+#include <linux/crash_core.h>
#include <asm/io.h>
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
-#include <linux/linkage.h>
#include <linux/compat.h>
#include <linux/ioport.h>
-#include <linux/elfcore.h>
-#include <linux/elf.h>
#include <linux/module.h>
#include <asm/kexec.h>
@@ -62,19 +60,15 @@
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define KEXEC_CORE_NOTE_NAME "CORE"
-#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
-#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
+
/*
* The per-cpu notes area is a list of notes terminated by a "NULL"
* note header. For kdump, the code in vmcore.c runs in the context
* of the second kernel to combine them into one note.
*/
#ifndef KEXEC_NOTE_BYTES
-#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
- KEXEC_CORE_NOTE_NAME_BYTES + \
- KEXEC_CORE_NOTE_DESC_BYTES )
+#define KEXEC_NOTE_BYTES CRASH_CORE_NOTE_BYTES
#endif
/*
@@ -256,33 +250,6 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
-void crash_save_vmcoreinfo(void);
-void arch_crash_save_vmcoreinfo(void);
-__printf(1, 2)
-void vmcoreinfo_append_str(const char *fmt, ...);
-phys_addr_t paddr_vmcoreinfo_note(void);
-
-#define VMCOREINFO_OSRELEASE(value) \
- vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(name))
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_LENGTH(name, value) \
- vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
-#define VMCOREINFO_NUMBER(name) \
- vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
-#define VMCOREINFO_CONFIG(name) \
- vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
@@ -303,31 +270,15 @@ extern int kexec_load_disabled;
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS)
-#define VMCOREINFO_BYTES (4096)
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
-#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
-#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \
- + VMCOREINFO_NOTE_NAME_BYTES)
-
/* Location of a reserved region to hold the crash kernel.
*/
extern struct resource crashk_res;
extern struct resource crashk_low_res;
-typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
extern note_buf_t __percpu *crash_notes;
-extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-extern size_t vmcoreinfo_size;
-extern size_t vmcoreinfo_max_size;
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
-int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index eaee981c5558..8496cf64575c 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -147,6 +147,14 @@ struct key_type {
*/
request_key_actor_t request_key;
+ /* Look up a keyring access restriction (optional)
+ *
+ * - NULL is a valid return value (meaning the requested restriction
+ * is known but will never block addition of a key)
+ * - should return -EINVAL if the restriction is unknown
+ */
+ struct key_restriction *(*lookup_restriction)(const char *params);
+
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
diff --git a/include/linux/key.h b/include/linux/key.h
index e45212f2777e..0c9b93b0d1f7 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -23,6 +23,7 @@
#include <linux/rwsem.h>
#include <linux/atomic.h>
#include <linux/assoc_array.h>
+#include <linux/refcount.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -126,6 +127,17 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
return (unsigned long) key_ref & 1UL;
}
+typedef int (*key_restrict_link_func_t)(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+
+struct key_restriction {
+ key_restrict_link_func_t check;
+ struct key *key;
+ struct key_type *keytype;
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -135,7 +147,7 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
* - Kerberos TGTs and tickets
*/
struct key {
- atomic_t usage; /* number of references */
+ refcount_t usage; /* number of references */
key_serial_t serial; /* key serial number */
union {
struct list_head graveyard_link;
@@ -205,18 +217,17 @@ struct key {
};
/* This is set on a keyring to restrict the addition of a link to a key
- * to it. If this method isn't provided then it is assumed that the
+ * to it. If this structure isn't provided then it is assumed that the
* keyring is open to any addition. It is ignored for non-keyring
- * keys.
+ * keys. Only set this value using keyring_restrict(), keyring_alloc(),
+ * or key_alloc().
*
* This is intended for use with rings of trusted keys whereby addition
* to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION
* overrides this, allowing the kernel to add extra keys without
* restriction.
*/
- int (*restrict_link)(struct key *keyring,
- const struct key_type *type,
- const union key_payload *payload);
+ struct key_restriction *restrict_link;
};
extern struct key *key_alloc(struct key_type *type,
@@ -225,9 +236,7 @@ extern struct key *key_alloc(struct key_type *type,
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
- int (*restrict_link)(struct key *,
- const struct key_type *,
- const union key_payload *));
+ struct key_restriction *restrict_link);
#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
@@ -242,7 +251,7 @@ extern void key_put(struct key *key);
static inline struct key *__key_get(struct key *key)
{
- atomic_inc(&key->usage);
+ refcount_inc(&key->usage);
return key;
}
@@ -303,14 +312,13 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
- int (*restrict_link)(struct key *,
- const struct key_type *,
- const union key_payload *),
+ struct key_restriction *restrict_link,
struct key *dest);
extern int restrict_link_reject(struct key *keyring,
const struct key_type *type,
- const union key_payload *payload);
+ const union key_payload *payload,
+ struct key *restriction_key);
extern int keyring_clear(struct key *keyring);
@@ -321,6 +329,9 @@ extern key_ref_t keyring_search(key_ref_t keyring,
extern int keyring_add_key(struct key *keyring,
struct key *key);
+extern int keyring_restrict(key_ref_t keyring, const char *type,
+ const char *restriction);
+
extern struct key *key_lookup(key_serial_t id);
static inline key_serial_t key_serial(const struct key *key)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e6284591599e..ca85cb80e99a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -108,6 +108,8 @@ extern int __must_check kobject_rename(struct kobject *, const char *new_name);
extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
+extern struct kobject * __must_check kobject_get_unless_zero(
+ struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index c328e4f7dcad..541df0b5b815 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern bool arch_function_offset_within_entry(unsigned long offset);
+extern bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
@@ -347,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
int write, void __user *buffer,
size_t *length, loff_t *ppos);
#endif
+extern void wait_for_kprobe_optimizer(void);
+#else
+static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
@@ -379,6 +384,7 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk);
}
+kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index f4156f88f557..29220724bf1c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -66,8 +66,6 @@ static inline void kref_get(struct kref *kref)
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
@@ -79,8 +77,6 @@ static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
release(kref);
return 1;
@@ -92,8 +88,6 @@ static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
return 1;
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index e1cfda4bee58..78b44a024eaa 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -61,7 +61,7 @@ static inline void set_page_stable_node(struct page *page,
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
-int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
#else /* !CONFIG_KSM */
@@ -94,10 +94,9 @@ static inline int page_referenced_ksm(struct page *page,
return 0;
}
-static inline int rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc)
{
- return 0;
}
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d0250744507a..8c0664309815 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -115,14 +115,17 @@ static inline bool is_error_page(struct page *page)
return IS_ERR(page);
}
+#define KVM_REQUEST_MASK GENMASK(7,0)
+#define KVM_REQUEST_NO_WAKEUP BIT(8)
+#define KVM_REQUEST_WAIT BIT(9)
/*
* Architecture-independent vcpu->requests bit members
* Bits 4-7 are reserved for more arch-independent bits.
*/
-#define KVM_REQ_TLB_FLUSH 0
-#define KVM_REQ_MMU_RELOAD 1
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_PENDING_TIMER 2
+#define KVM_REQ_UNHALT 3
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -268,6 +271,12 @@ struct kvm_vcpu {
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
+ /*
+ * The memory barrier ensures a previous write to vcpu->requests cannot
+ * be reordered with the read of vcpu->mode. It pairs with the general
+ * memory barrier following the write of vcpu->mode in VCPU RUN.
+ */
+ smp_mb__before_atomic();
return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}
@@ -375,8 +384,6 @@ struct kvm {
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
- struct srcu_struct srcu;
- struct srcu_struct irq_srcu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
@@ -403,7 +410,7 @@ struct kvm {
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
struct list_head coalesced_zones;
@@ -429,6 +436,8 @@ struct kvm {
struct list_head devices;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
+ struct srcu_struct srcu;
+ struct srcu_struct irq_srcu;
};
#define kvm_err(fmt, ...) \
@@ -490,6 +499,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
+static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *tmp;
+ int idx;
+
+ kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
+ if (tmp == vcpu)
+ return idx;
+ BUG();
+}
+
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
@@ -502,10 +522,10 @@ int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
-static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
@@ -641,18 +661,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len);
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len);
-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- gpa_t gpa, unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
@@ -682,7 +702,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
-void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -767,8 +787,6 @@ void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
-void *kvm_kvzalloc(unsigned long size);
-
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
@@ -877,22 +895,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-#else
-static inline int kvm_iommu_map_pages(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- return 0;
-}
-
-static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
-}
-#endif
-
/*
* search_memslots() and __gfn_to_memslot() are here because they are
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
@@ -1025,6 +1027,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#define KVM_MAX_IRQ_ROUTES 1024
#endif
+bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
@@ -1092,13 +1095,23 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
*/
smp_wmb();
- set_bit(req, &vcpu->requests);
+ set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+}
+
+static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
+{
+ return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+}
+
+static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
+{
+ clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
- if (test_bit(req, &vcpu->requests)) {
- clear_bit(req, &vcpu->requests);
+ if (kvm_test_request(req, vcpu)) {
+ kvm_clear_request(req, vcpu);
/*
* Ensure the rest of the request is visible to kvm_check_request's
@@ -1165,7 +1178,6 @@ int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
-extern struct kvm_device_ops kvm_xics_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index d215b4561180..5e240b2b4d58 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -22,7 +22,8 @@ enum pca9532_state {
PCA9532_OFF = 0x0,
PCA9532_ON = 0x1,
PCA9532_PWM0 = 0x2,
- PCA9532_PWM1 = 0x3
+ PCA9532_PWM1 = 0x3,
+ PCA9532_KEEP = 0xff,
};
struct pca9532_led {
@@ -44,4 +45,3 @@ struct pca9532_platform_data {
};
#endif /* __LINUX_PCA9532_H */
-
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 38c0bd7ca107..64c56d454f7d 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -122,10 +122,16 @@ struct led_classdev {
struct mutex led_access;
};
-extern int led_classdev_register(struct device *parent,
- struct led_classdev *led_cdev);
-extern int devm_led_classdev_register(struct device *parent,
- struct led_classdev *led_cdev);
+extern int of_led_classdev_register(struct device *parent,
+ struct device_node *np,
+ struct led_classdev *led_cdev);
+#define led_classdev_register(parent, led_cdev) \
+ of_led_classdev_register(parent, NULL, led_cdev)
+extern int devm_of_led_classdev_register(struct device *parent,
+ struct device_node *np,
+ struct led_classdev *led_cdev);
+#define devm_led_classdev_register(parent, led_cdev) \
+ devm_of_led_classdev_register(parent, NULL, led_cdev)
extern void led_classdev_unregister(struct led_classdev *led_cdev);
extern void devm_led_classdev_unregister(struct device *parent,
struct led_classdev *led_cdev);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 77e7af32543f..6c807017128d 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -20,9 +20,11 @@
enum {
/* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 1 << 0,
+ NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
- NDD_UNARMED = 1 << 1,
+ NDD_UNARMED = 1,
+ /* locked memory devices should not be accessed */
+ NDD_LOCKED = 2,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -120,7 +122,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
-void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus,
phys_addr_t start, unsigned int len);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ca45e4a088a9..7dfa56ebbc6d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -56,7 +56,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
@@ -70,7 +69,6 @@ struct nvm_dev_ops {
nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
- nvm_erase_blk_fn *erase_block;
nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool;
@@ -125,7 +123,7 @@ enum {
/* NAND Access Modes */
NVM_IO_SUSPEND = 0x80,
NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_DISABLE = 0x200,
+ NVM_IO_SCRAMBLE_ENABLE = 0x200,
/* Block Types */
NVM_BLK_T_FREE = 0x0,
@@ -438,7 +436,8 @@ static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
+typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
+ int flags);
typedef void (nvm_tgt_exit_fn)(void *);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
@@ -479,10 +478,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
+extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int);
-extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 9072f04db616..194991ef9347 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -23,15 +23,16 @@
#include <linux/module.h>
#include <linux/ftrace.h>
+#include <linux/completion.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
#include <asm/livepatch.h>
-enum klp_state {
- KLP_DISABLED,
- KLP_ENABLED
-};
+/* task patch states */
+#define KLP_UNDEFINED -1
+#define KLP_UNPATCHED 0
+#define KLP_PATCHED 1
/**
* struct klp_func - function structure for live patching
@@ -39,10 +40,29 @@ enum klp_state {
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
+ * @immediate: patch the func immediately, bypassing safety mechanisms
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
- * @state: tracks function-level patch application state
* @stack_node: list node for klp_ops func_stack list
+ * @old_size: size of the old function
+ * @new_size: size of the new function
+ * @patched: the func has been added to the klp_ops list
+ * @transition: the func is currently being applied or reverted
+ *
+ * The patched and transition variables define the func's patching state. When
+ * patching, a func is always in one of the following states:
+ *
+ * patched=0 transition=0: unpatched
+ * patched=0 transition=1: unpatched, temporary starting state
+ * patched=1 transition=1: patched, may be visible to some tasks
+ * patched=1 transition=0: patched, visible to all tasks
+ *
+ * And when unpatching, it goes in the reverse order:
+ *
+ * patched=1 transition=0: patched, visible to all tasks
+ * patched=1 transition=1: patched, may be visible to some tasks
+ * patched=0 transition=1: unpatched, temporary ending state
+ * patched=0 transition=0: unpatched
*/
struct klp_func {
/* external */
@@ -56,12 +76,15 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
+ bool immediate;
/* internal */
unsigned long old_addr;
struct kobject kobj;
- enum klp_state state;
struct list_head stack_node;
+ unsigned long old_size, new_size;
+ bool patched;
+ bool transition;
};
/**
@@ -70,8 +93,8 @@ struct klp_func {
* @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources
* @mod: kernel module associated with the patched object
- * (NULL for vmlinux)
- * @state: tracks object-level patch application state
+ * (NULL for vmlinux)
+ * @patched: the object's funcs have been added to the klp_ops list
*/
struct klp_object {
/* external */
@@ -81,26 +104,30 @@ struct klp_object {
/* internal */
struct kobject kobj;
struct module *mod;
- enum klp_state state;
+ bool patched;
};
/**
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
+ * @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
- * @state: tracks patch-level application state
+ * @enabled: the patch is enabled (but operation may be incomplete)
+ * @finish: for waiting till it is safe to remove the patch module
*/
struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
+ bool immediate;
/* internal */
struct list_head list;
struct kobject kobj;
- enum klp_state state;
+ bool enabled;
+ struct completion finish;
};
#define klp_for_each_object(patch, obj) \
@@ -123,10 +150,27 @@ void arch_klp_init_object_loaded(struct klp_patch *patch,
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
+void klp_copy_process(struct task_struct *child);
+void klp_update_patch_state(struct task_struct *task);
+
+static inline bool klp_patch_pending(struct task_struct *task)
+{
+ return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
+}
+
+static inline bool klp_have_reliable_stack(void)
+{
+ return IS_ENABLED(CONFIG_STACKTRACE) &&
+ IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
+}
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
-static inline void klp_module_going(struct module *mod) { }
+static inline void klp_module_going(struct module *mod) {}
+static inline bool klp_patch_pending(struct task_struct *task) { return false; }
+static inline void klp_update_patch_state(struct task_struct *task) {}
+static inline void klp_copy_process(struct task_struct *child) {}
#endif /* CONFIG_LIVEPATCH */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 140edab64446..05728396a1a1 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -18,6 +18,7 @@
/* Dummy declarations */
struct svc_rqst;
+struct rpc_task;
/*
* This is the set of functions for lockd->nfsd communication
@@ -43,6 +44,7 @@ struct nlmclnt_initdata {
u32 nfs_version;
int noresvport;
struct net *net;
+ const struct nlmclnt_operations *nlmclnt_ops;
};
/*
@@ -52,8 +54,26 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
-extern int nlmclnt_proc(struct nlm_host *host, int cmd,
- struct file_lock *fl);
+/*
+ * NLM client operations provide a means to modify RPC processing of NLM
+ * requests. Callbacks receive a pointer to data passed into the call to
+ * nlmclnt_proc().
+ */
+struct nlmclnt_operations {
+ /* Called on successful allocation of nlm_rqst, use for allocation or
+ * reference counting. */
+ void (*nlmclnt_alloc_call)(void *);
+
+ /* Called in rpc_task_prepare for unlock. A return value of true
+ * indicates the callback has put the task to sleep on a waitqueue
+ * and NLM should not call rpc_call_start(). */
+ bool (*nlmclnt_unlock_prepare)(struct rpc_task*, void *);
+
+ /* Called when the nlm_rqst is freed, callbacks should clean up here */
+ void (*nlmclnt_release_call)(void *);
+};
+
+extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data);
extern int lockd_up(struct net *net);
extern void lockd_down(struct net *net);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index b37dee3acaba..41f7b6a04d69 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -69,6 +69,7 @@ struct nlm_host {
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
char nodename[UNX_MAXNODENAME + 1];
+ const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */
};
/*
@@ -142,6 +143,7 @@ struct nlm_rqst {
struct nlm_block * a_block;
unsigned int a_retries; /* Retry count */
u8 a_owner[NLMCLNT_OHSIZE];
+ void * a_callback_data; /* sent to nlmclnt_operations callbacks */
};
/*
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1e327bb80838..fffe49f188e6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -361,6 +361,8 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
+extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
+
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
@@ -411,6 +413,7 @@ static inline void lockdep_on(void)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
+# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index e29d4c62a3c8..080f34e66017 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -533,8 +533,13 @@
* manual page for definitions of the @clone_flags.
* @clone_flags contains the flags indicating what should be shared.
* Return 0 if permission is granted.
+ * @task_alloc:
+ * @task task being allocated.
+ * @clone_flags contains the flags indicating what should be shared.
+ * Handle allocation of task-related resources.
+ * Returns a zero on success, negative values on failure.
* @task_free:
- * @task task being freed
+ * @task task about to be freed.
* Handle release of task-related resources. (Note that this can be called
* from interrupt context.)
* @cred_alloc_blank:
@@ -630,10 +635,19 @@
* Check permission before getting the ioprio value of @p.
* @p contains the task_struct of process.
* Return 0 if permission is granted.
+ * @task_prlimit:
+ * Check permission before getting and/or setting the resource limits of
+ * another task.
+ * @cred points to the cred structure for the current task.
+ * @tcred points to the cred structure for the target task.
+ * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the
+ * resource limits are being read, modified, or both.
+ * Return 0 if permission is granted.
* @task_setrlimit:
- * Check permission before setting the resource limits of the current
- * process for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (current->signal->rlim + resource).
+ * Check permission before setting the resource limits of process @p
+ * for @resource to @new_rlim. The old resource limit values can
+ * be examined by dereferencing (p->signal->rlim + resource).
+ * @p points to the task_struct for the target task's group leader.
* @resource contains the resource whose limit is being set.
* @new_rlim contains the new limits for @resource.
* Return 0 if permission is granted.
@@ -1473,6 +1487,7 @@ union security_list_options {
int (*file_open)(struct file *file, const struct cred *cred);
int (*task_create)(unsigned long clone_flags);
+ int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
void (*cred_free)(struct cred *cred);
@@ -1494,6 +1509,8 @@ union security_list_options {
int (*task_setnice)(struct task_struct *p, int nice);
int (*task_setioprio)(struct task_struct *p, int ioprio);
int (*task_getioprio)(struct task_struct *p);
+ int (*task_prlimit)(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags);
int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int (*task_setscheduler)(struct task_struct *p);
@@ -1737,6 +1754,7 @@ struct security_hook_heads {
struct list_head file_receive;
struct list_head file_open;
struct list_head task_create;
+ struct list_head task_alloc;
struct list_head task_free;
struct list_head cred_alloc_blank;
struct list_head cred_free;
@@ -1755,6 +1773,7 @@ struct security_hook_heads {
struct list_head task_setnice;
struct list_head task_setioprio;
struct list_head task_getioprio;
+ struct list_head task_prlimit;
struct list_head task_setrlimit;
struct list_head task_setscheduler;
struct list_head task_getscheduler;
@@ -1908,6 +1927,13 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
}
#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+/* Currently required to handle SELinux runtime hook disable. */
+#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
+#define __lsm_ro_after_init
+#else
+#define __lsm_ro_after_init __ro_after_init
+#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
+
extern int __init security_module_enable(const char *module);
extern void __init capability_add_hooks(void);
#ifdef CONFIG_SECURITY_YAMA
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
index 6b55c938b401..c20b4843fc2d 100644
--- a/include/linux/mailbox/brcm-message.h
+++ b/include/linux/mailbox/brcm-message.h
@@ -16,6 +16,7 @@
enum brcm_message_type {
BRCM_MESSAGE_UNKNOWN = 0,
+ BRCM_MESSAGE_BATCH,
BRCM_MESSAGE_SPU,
BRCM_MESSAGE_SBA,
BRCM_MESSAGE_MAX,
@@ -23,24 +24,29 @@ enum brcm_message_type {
struct brcm_sba_command {
u64 cmd;
+ u64 *cmd_dma;
+ dma_addr_t cmd_dma_addr;
#define BRCM_SBA_CMD_TYPE_A BIT(0)
#define BRCM_SBA_CMD_TYPE_B BIT(1)
#define BRCM_SBA_CMD_TYPE_C BIT(2)
#define BRCM_SBA_CMD_HAS_RESP BIT(3)
#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4)
u64 flags;
- dma_addr_t input;
- size_t input_len;
dma_addr_t resp;
size_t resp_len;
- dma_addr_t output;
- size_t output_len;
+ dma_addr_t data;
+ size_t data_len;
};
struct brcm_message {
enum brcm_message_type type;
union {
struct {
+ struct brcm_message *msgs;
+ unsigned int msgs_queued;
+ unsigned int msgs_count;
+ } batch;
+ struct {
struct scatterlist *src;
struct scatterlist *dst;
} spu;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index bdfc65af4152..4ce24a376262 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -93,6 +93,7 @@ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
ulong choose_memblock_flags(void);
/* Low level functions */
@@ -335,6 +336,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
int memblock_is_map_memory(phys_addr_t addr);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bb7250c45cb8..899949bbb2f9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,48 +35,43 @@ struct page;
struct mm_struct;
struct kmem_cache;
-/*
- * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
- * These two lists should keep in accord with each other.
- */
-enum mem_cgroup_stat_index {
- /*
- * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
- */
- MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
- MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
- MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
- MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
- MEM_CGROUP_STAT_NSTATS,
- /* default hierarchy stats */
- MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
+/* Cgroup-specific page state, on top of universal node page state */
+enum memcg_stat_item {
+ MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
+ MEMCG_RSS,
+ MEMCG_RSS_HUGE,
+ MEMCG_SWAP,
+ MEMCG_SOCK,
+ /* XXX: why are these zone and not node counters? */
+ MEMCG_KERNEL_STACK_KB,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
- MEMCG_SOCK,
MEMCG_NR_STAT,
};
+/* Cgroup-specific events, on top of universal VM events */
+enum memcg_event_item {
+ MEMCG_LOW = NR_VM_EVENT_ITEMS,
+ MEMCG_HIGH,
+ MEMCG_MAX,
+ MEMCG_OOM,
+ MEMCG_NR_EVENTS,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
int priority;
unsigned int generation;
};
-enum mem_cgroup_events_index {
- MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
- MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
- MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
- MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
- MEM_CGROUP_EVENTS_NSTATS,
- /* default hierarchy events */
- MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
- MEMCG_HIGH,
- MEMCG_MAX,
- MEMCG_OOM,
- MEMCG_NR_EVENTS,
+#ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT 16
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
+struct mem_cgroup_id {
+ int id;
+ atomic_t ref;
};
/*
@@ -92,16 +87,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-#ifdef CONFIG_MEMCG
-
-#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
-
-struct mem_cgroup_id {
- int id;
- atomic_t ref;
-};
-
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
@@ -283,17 +268,10 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-/**
- * mem_cgroup_events - count memory events against a cgroup
- * @memcg: the memory cgroup
- * @idx: the event index
- * @nr: the number of events to account for
- */
-static inline void mem_cgroup_events(struct mem_cgroup *memcg,
- enum mem_cgroup_events_index idx,
- unsigned int nr)
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+ enum memcg_event_item event)
{
- this_cpu_add(memcg->stat->events[idx], nr);
+ this_cpu_inc(memcg->stat->events[event]);
cgroup_file_notify(&memcg->events_file);
}
@@ -494,8 +472,42 @@ extern int do_swap_account;
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ long val = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ val += per_cpu(memcg->stat->count[idx], cpu);
+
+ if (val < 0)
+ val = 0;
+
+ return val;
+}
+
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val)
+{
+ if (!mem_cgroup_disabled())
+ this_cpu_add(memcg->stat->count[idx], val);
+}
+
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_state(memcg, idx, 1);
+}
+
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_state(memcg, idx, -1);
+}
+
/**
- * mem_cgroup_update_page_stat - update page state statistics
+ * mod_memcg_page_state - update page state statistics
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
@@ -506,28 +518,28 @@ void unlock_page_memcg(struct page *page);
*
* lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page))
- * mem_cgroup_update_page_stat(page, state, -1);
+ * mod_memcg_page_state(page, state, -1);
* unlock_page(page) or unlock_page_memcg(page)
+ *
+ * Kernel pages are an exception to this, since they'll never move.
*/
-static inline void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
- VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
-
if (page->mem_cgroup)
- this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+ mod_memcg_state(page->mem_cgroup, idx, val);
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_page_stat(page, idx, 1);
+ mod_memcg_page_state(page, idx, 1);
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_page_stat(page, idx, -1);
+ mod_memcg_page_state(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -544,20 +556,8 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
- goto out;
-
- switch (idx) {
- case PGFAULT:
- this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
- break;
- case PGMAJFAULT:
- this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
- break;
- default:
- BUG();
- }
-out:
+ if (likely(memcg))
+ this_cpu_inc(memcg->stat->events[idx]);
rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -576,9 +576,8 @@ static inline bool mem_cgroup_disabled(void)
return true;
}
-static inline void mem_cgroup_events(struct mem_cgroup *memcg,
- enum mem_cgroup_events_index idx,
- unsigned int nr)
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+ enum memcg_event_item event)
{
}
@@ -740,19 +739,41 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
-static inline void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx,
- int nr)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ return 0;
+}
+
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx,
+ int nr)
+{
+}
+
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+}
+
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+}
+
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
}
@@ -872,7 +893,7 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
* @val: number of pages (positive or negative)
*/
static inline void memcg_kmem_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
if (memcg_kmem_enabled() && page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
@@ -901,7 +922,7 @@ static inline void memcg_put_cache_ids(void)
}
static inline void memcg_kmem_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 64faeeff698c..bfeecf179895 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -12,6 +12,8 @@
#define _ARIZONA_PDATA_H
#include <dt-bindings/mfd/arizona.h>
+#include <linux/regulator/arizona-ldo1.h>
+#include <linux/regulator/arizona-micsupp.h>
#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
@@ -76,13 +78,12 @@ struct arizona_micd_range {
struct arizona_pdata {
int reset; /** GPIO controlling /RESET, if any */
- int ldoena; /** GPIO controlling LODENA, if any */
/** Regulator configuration for MICVDD */
- struct regulator_init_data *micvdd;
+ struct arizona_micsupp_pdata micvdd;
/** Regulator configuration for LDO1 */
- struct regulator_init_data *ldo1;
+ struct arizona_ldo1_pdata ldo1;
/** If a direct 32kHz clock is provided on an MCLK specify it here */
int clk32k_src;
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 0d9a1ff38393..cde56cfe8446 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -20,6 +20,7 @@ enum axp20x_variants {
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP803_ID,
AXP806_ID,
AXP809_ID,
NR_AXP20X_VARIANTS,
@@ -228,13 +229,13 @@ enum axp20x_variants {
#define AXP20X_OCV_MAX 0xf
/* AXP22X specific registers */
-#define AXP22X_PMIC_ADC_H 0x56
-#define AXP22X_PMIC_ADC_L 0x57
+#define AXP22X_PMIC_TEMP_H 0x56
+#define AXP22X_PMIC_TEMP_L 0x57
#define AXP22X_TS_ADC_H 0x58
#define AXP22X_TS_ADC_L 0x59
#define AXP22X_BATLOW_THRES1 0xe6
-/* AXP288 specific registers */
+/* AXP288/AXP803 specific registers */
#define AXP288_POWER_REASON 0x02
#define AXP288_BC_GLOBAL 0x2c
#define AXP288_BC_VBUS_CNTL 0x2d
@@ -475,6 +476,43 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp803_irqs {
+ AXP803_IRQ_ACIN_OVER_V = 1,
+ AXP803_IRQ_ACIN_PLUGIN,
+ AXP803_IRQ_ACIN_REMOVAL,
+ AXP803_IRQ_VBUS_OVER_V,
+ AXP803_IRQ_VBUS_PLUGIN,
+ AXP803_IRQ_VBUS_REMOVAL,
+ AXP803_IRQ_BATT_PLUGIN,
+ AXP803_IRQ_BATT_REMOVAL,
+ AXP803_IRQ_BATT_ENT_ACT_MODE,
+ AXP803_IRQ_BATT_EXIT_ACT_MODE,
+ AXP803_IRQ_CHARG,
+ AXP803_IRQ_CHARG_DONE,
+ AXP803_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP803_IRQ_BATT_CHG_TEMP_HIGH_END,
+ AXP803_IRQ_BATT_CHG_TEMP_LOW,
+ AXP803_IRQ_BATT_CHG_TEMP_LOW_END,
+ AXP803_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP803_IRQ_BATT_ACT_TEMP_HIGH_END,
+ AXP803_IRQ_BATT_ACT_TEMP_LOW,
+ AXP803_IRQ_BATT_ACT_TEMP_LOW_END,
+ AXP803_IRQ_DIE_TEMP_HIGH,
+ AXP803_IRQ_GPADC,
+ AXP803_IRQ_LOW_PWR_LVL1,
+ AXP803_IRQ_LOW_PWR_LVL2,
+ AXP803_IRQ_TIMER,
+ AXP803_IRQ_PEK_RIS_EDGE,
+ AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_SHORT,
+ AXP803_IRQ_PEK_LONG,
+ AXP803_IRQ_PEK_OVER_OFF,
+ AXP803_IRQ_GPIO1_INPUT,
+ AXP803_IRQ_GPIO0_INPUT,
+ AXP803_IRQ_BC_USB_CHNG,
+ AXP803_IRQ_MV_CHNG,
+};
+
enum axp806_irqs {
AXP806_IRQ_DIE_TEMP_HIGH_LV1,
AXP806_IRQ_DIE_TEMP_HIGH_LV2,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 7a01c94496f1..28baee63eaf6 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -35,10 +35,11 @@
* Max bus-specific overhead incurred by request/responses.
* I2C requires 1 additional byte for requests.
* I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
* */
#define EC_PROTO_VERSION_UNKNOWN 0
#define EC_MAX_REQUEST_OVERHEAD 1
-#define EC_MAX_RESPONSE_OVERHEAD 2
+#define EC_MAX_RESPONSE_OVERHEAD 32
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
@@ -304,4 +305,22 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
+/* ACPI GPE handler */
+#ifdef CONFIG_ACPI
+
+int cros_ec_acpi_install_gpe_handler(struct device *dev);
+void cros_ec_acpi_remove_gpe_handler(void);
+void cros_ec_acpi_clear_gpe(void);
+
+#else /* CONFIG_ACPI */
+
+static inline int cros_ec_acpi_install_gpe_handler(struct device *dev)
+{
+ return -ENODEV;
+}
+static inline void cros_ec_acpi_remove_gpe_handler(void) {}
+static inline void cros_ec_acpi_clear_gpe(void) {}
+
+#endif /* CONFIG_ACPI */
+
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
index 376ba84366a0..74d33a01ddae 100644
--- a/include/linux/mfd/da9062/core.h
+++ b/include/linux/mfd/da9062/core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ * Copyright (C) 2015-2017 Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,7 +18,31 @@
#include <linux/interrupt.h>
#include <linux/mfd/da9062/registers.h>
-/* Interrupts */
+enum da9062_compatible_types {
+ COMPAT_TYPE_DA9061 = 1,
+ COMPAT_TYPE_DA9062,
+};
+
+enum da9061_irqs {
+ /* IRQ A */
+ DA9061_IRQ_ONKEY,
+ DA9061_IRQ_WDG_WARN,
+ DA9061_IRQ_SEQ_RDY,
+ /* IRQ B*/
+ DA9061_IRQ_TEMP,
+ DA9061_IRQ_LDO_LIM,
+ DA9061_IRQ_DVC_RDY,
+ DA9061_IRQ_VDD_WARN,
+ /* IRQ C */
+ DA9061_IRQ_GPI0,
+ DA9061_IRQ_GPI1,
+ DA9061_IRQ_GPI2,
+ DA9061_IRQ_GPI3,
+ DA9061_IRQ_GPI4,
+
+ DA9061_NUM_IRQ,
+};
+
enum da9062_irqs {
/* IRQ A */
DA9062_IRQ_ONKEY,
@@ -45,6 +69,7 @@ struct da9062 {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irq;
+ enum da9062_compatible_types chip_type;
};
#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
index 97790d1b02c5..18d576aed902 100644
--- a/include/linux/mfd/da9062/registers.h
+++ b/include/linux/mfd/da9062/registers.h
@@ -1,6 +1,5 @@
/*
- * registers.h - REGISTERS H for DA9062
- * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ * Copyright (C) 2015-2017 Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,6 +17,8 @@
#define DA9062_PMIC_DEVICE_ID 0x62
#define DA9062_PMIC_VARIANT_MRC_AA 0x01
+#define DA9062_PMIC_VARIANT_VRC_DA9061 0x01
+#define DA9062_PMIC_VARIANT_VRC_DA9062 0x02
#define DA9062_I2C_PAGE_SEL_SHIFT 1
diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 1a0ee9d6efe9..0c351bc85d2d 100644
--- a/include/linux/mfd/intel_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,5 +1,5 @@
/*
- * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC
+ * Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
*
@@ -13,8 +13,6 @@
* more details.
*/
-#include <linux/mfd/intel_soc_pmic.h>
-
#ifndef __INTEL_BXTWC_H__
#define __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h
index b4031c2b2214..aefc49cb7ba9 100644
--- a/include/linux/mfd/motorola-cpcap.h
+++ b/include/linux/mfd/motorola-cpcap.h
@@ -14,6 +14,9 @@
* published by the Free Software Foundation.
*/
+#include <linux/device.h>
+#include <linux/regmap.h>
+
#define CPCAP_VENDOR_ST 0
#define CPCAP_VENDOR_TI 1
@@ -290,3 +293,5 @@ static inline int cpcap_get_vendor(struct device *dev,
return 0;
}
+
+extern int cpcap_sense_virq(struct regmap *regmap, int virq);
diff --git a/include/linux/mfd/mxs-lradc.h b/include/linux/mfd/mxs-lradc.h
new file mode 100644
index 000000000000..661a4521f723
--- /dev/null
+++ b/include/linux/mfd/mxs-lradc.h
@@ -0,0 +1,187 @@
+/*
+ * Freescale MXS Low Resolution Analog-to-Digital Converter driver
+ *
+ * Copyright (c) 2012 DENX Software Engineering, GmbH.
+ * Copyright (c) 2016 Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
+ *
+ * Author: Marek Vasut <marex@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MXS_LRADC_H
+#define __MFD_MXS_LRADC_H
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/stmp_device.h>
+
+#define LRADC_MAX_DELAY_CHANS 4
+#define LRADC_MAX_MAPPED_CHANS 8
+#define LRADC_MAX_TOTAL_CHANS 16
+
+#define LRADC_DELAY_TIMER_HZ 2000
+
+#define LRADC_CTRL0 0x00
+# define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE BIT(23)
+# define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE BIT(22)
+# define LRADC_CTRL0_MX28_YNNSW /* YM */ BIT(21)
+# define LRADC_CTRL0_MX28_YPNSW /* YP */ BIT(20)
+# define LRADC_CTRL0_MX28_YPPSW /* YP */ BIT(19)
+# define LRADC_CTRL0_MX28_XNNSW /* XM */ BIT(18)
+# define LRADC_CTRL0_MX28_XNPSW /* XM */ BIT(17)
+# define LRADC_CTRL0_MX28_XPPSW /* XP */ BIT(16)
+
+# define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE BIT(20)
+# define LRADC_CTRL0_MX23_YM BIT(19)
+# define LRADC_CTRL0_MX23_XM BIT(18)
+# define LRADC_CTRL0_MX23_YP BIT(17)
+# define LRADC_CTRL0_MX23_XP BIT(16)
+
+# define LRADC_CTRL0_MX28_PLATE_MASK \
+ (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \
+ LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \
+ LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW)
+
+# define LRADC_CTRL0_MX23_PLATE_MASK \
+ (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \
+ LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP)
+
+#define LRADC_CTRL1 0x10
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN BIT(24)
+#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16))
+#define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16)
+#define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16)
+#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ BIT(8)
+#define LRADC_CTRL1_LRADC_IRQ(n) BIT(n)
+#define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff
+#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
+
+#define LRADC_CTRL2 0x20
+#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24
+#define LRADC_CTRL2_TEMPSENSE_PWD BIT(15)
+
+#define LRADC_STATUS 0x40
+#define LRADC_STATUS_TOUCH_DETECT_RAW BIT(0)
+
+#define LRADC_CH(n) (0x50 + (0x10 * (n)))
+#define LRADC_CH_ACCUMULATE BIT(29)
+#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24)
+#define LRADC_CH_NUM_SAMPLES_OFFSET 24
+#define LRADC_CH_NUM_SAMPLES(x) \
+ ((x) << LRADC_CH_NUM_SAMPLES_OFFSET)
+#define LRADC_CH_VALUE_MASK 0x3ffff
+#define LRADC_CH_VALUE_OFFSET 0
+
+#define LRADC_DELAY(n) (0xd0 + (0x10 * (n)))
+#define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xffUL << 24)
+#define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24
+#define LRADC_DELAY_TRIGGER(x) \
+ (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_LRADCS_MASK)
+#define LRADC_DELAY_KICK BIT(20)
+#define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16)
+#define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16
+#define LRADC_DELAY_TRIGGER_DELAYS(x) \
+ (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_DELAYS_MASK)
+#define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11)
+#define LRADC_DELAY_LOOP_COUNT_OFFSET 11
+#define LRADC_DELAY_LOOP(x) \
+ (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \
+ LRADC_DELAY_LOOP_COUNT_MASK)
+#define LRADC_DELAY_DELAY_MASK 0x7ff
+#define LRADC_DELAY_DELAY_OFFSET 0
+#define LRADC_DELAY_DELAY(x) \
+ (((x) << LRADC_DELAY_DELAY_OFFSET) & \
+ LRADC_DELAY_DELAY_MASK)
+
+#define LRADC_CTRL4 0x140
+#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
+#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
+#define LRADC_CTRL4_LRADCSELECT(n, x) \
+ (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
+ LRADC_CTRL4_LRADCSELECT_MASK(n))
+
+#define LRADC_RESOLUTION 12
+#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+
+#define BUFFER_VCHANS_LIMITED 0x3f
+#define BUFFER_VCHANS_ALL 0xff
+
+ /*
+ * Certain LRADC channels are shared between touchscreen
+ * and/or touch-buttons and generic LRADC block. Therefore when using
+ * either of these, these channels are not available for the regular
+ * sampling. The shared channels are as follows:
+ *
+ * CH0 -- Touch button #0
+ * CH1 -- Touch button #1
+ * CH2 -- Touch screen XPUL
+ * CH3 -- Touch screen YPLL
+ * CH4 -- Touch screen XNUL
+ * CH5 -- Touch screen YNLR
+ * CH6 -- Touch screen WIPER (5-wire only)
+ *
+ * The bit fields below represents which parts of the LRADC block are
+ * switched into special mode of operation. These channels can not
+ * be sampled as regular LRADC channels. The driver will refuse any
+ * attempt to sample these channels.
+ */
+#define CHAN_MASK_TOUCHBUTTON (BIT(1) | BIT(0))
+#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2)
+#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2)
+
+enum mxs_lradc_id {
+ IMX23_LRADC,
+ IMX28_LRADC,
+};
+
+enum mxs_lradc_ts_wires {
+ MXS_LRADC_TOUCHSCREEN_NONE = 0,
+ MXS_LRADC_TOUCHSCREEN_4WIRE,
+ MXS_LRADC_TOUCHSCREEN_5WIRE,
+};
+
+/**
+ * struct mxs_lradc
+ * @soc: soc type (IMX23 or IMX28)
+ * @clk: 2 kHz clock for delay units
+ * @buffer_vchans: channels that can be used during buffered capture
+ * @touchscreen_wire: touchscreen type (4-wire or 5-wire)
+ * @use_touchbutton: button state (on or off)
+ */
+struct mxs_lradc {
+ enum mxs_lradc_id soc;
+ struct clk *clk;
+ u8 buffer_vchans;
+
+ enum mxs_lradc_ts_wires touchscreen_wire;
+ bool use_touchbutton;
+};
+
+static inline u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
+{
+ switch (lradc->soc) {
+ case IMX23_LRADC:
+ return LRADC_CTRL1_MX23_LRADC_IRQ_MASK;
+ case IMX28_LRADC:
+ return LRADC_CTRL1_MX28_LRADC_IRQ_MASK;
+ default:
+ return 0;
+ }
+}
+
+#endif /* __MXS_LRADC_H */
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index d0300045f04a..4a0abbc10ef6 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -21,6 +21,7 @@
#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
#define TIM_PSC 0x28 /* Prescaler */
#define TIM_ARR 0x2c /* Auto-Reload Register */
#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
@@ -30,6 +31,7 @@
#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index d7a29f246d64..139872c2e0fe 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -28,6 +28,7 @@
#define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4)
#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3)
#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x))
+#define SUN4I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(2, 0)
/* TP_CTRL1 bits for sun6i SOCs */
#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7)
@@ -35,6 +36,11 @@
#define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5)
#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4)
#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x))
+#define SUN6I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(3, 0)
+
+/* TP_CTRL1 bits for sun8i SoCs */
+#define SUN8I_GPADC_CTRL1_CHOP_TEMP_EN BIT(8)
+#define SUN8I_GPADC_CTRL1_GPADC_CALI_EN BIT(7)
#define SUN4I_GPADC_CTRL2 0x08
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index be6ebe64eebe..afa266169800 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -17,157 +17,92 @@
#include <linux/kernel.h>
#include <linux/regmap.h>
-#define AT91SAM9_SMC_GENERIC 0x00
-#define AT91SAM9_SMC_GENERIC_BLK_SZ 0x10
-
-#define SAMA5_SMC_GENERIC 0x600
-#define SAMA5_SMC_GENERIC_BLK_SZ 0x14
-
-#define AT91SAM9_SMC_SETUP(o) ((o) + 0x00)
-#define AT91SAM9_SMC_NWESETUP(x) (x)
-#define AT91SAM9_SMC_NCS_WRSETUP(x) ((x) << 8)
-#define AT91SAM9_SMC_NRDSETUP(x) ((x) << 16)
-#define AT91SAM9_SMC_NCS_NRDSETUP(x) ((x) << 24)
-
-#define AT91SAM9_SMC_PULSE(o) ((o) + 0x04)
-#define AT91SAM9_SMC_NWEPULSE(x) (x)
-#define AT91SAM9_SMC_NCS_WRPULSE(x) ((x) << 8)
-#define AT91SAM9_SMC_NRDPULSE(x) ((x) << 16)
-#define AT91SAM9_SMC_NCS_NRDPULSE(x) ((x) << 24)
-
-#define AT91SAM9_SMC_CYCLE(o) ((o) + 0x08)
-#define AT91SAM9_SMC_NWECYCLE(x) (x)
-#define AT91SAM9_SMC_NRDCYCLE(x) ((x) << 16)
-
-#define AT91SAM9_SMC_MODE(o) ((o) + 0x0c)
-#define SAMA5_SMC_MODE(o) ((o) + 0x10)
-#define AT91_SMC_READMODE BIT(0)
-#define AT91_SMC_READMODE_NCS (0 << 0)
-#define AT91_SMC_READMODE_NRD (1 << 0)
-#define AT91_SMC_WRITEMODE BIT(1)
-#define AT91_SMC_WRITEMODE_NCS (0 << 1)
-#define AT91_SMC_WRITEMODE_NWE (1 << 1)
-#define AT91_SMC_EXNWMODE GENMASK(5, 4)
-#define AT91_SMC_EXNWMODE_DISABLE (0 << 4)
-#define AT91_SMC_EXNWMODE_FROZEN (2 << 4)
-#define AT91_SMC_EXNWMODE_READY (3 << 4)
-#define AT91_SMC_BAT BIT(8)
-#define AT91_SMC_BAT_SELECT (0 << 8)
-#define AT91_SMC_BAT_WRITE (1 << 8)
-#define AT91_SMC_DBW GENMASK(13, 12)
-#define AT91_SMC_DBW_8 (0 << 12)
-#define AT91_SMC_DBW_16 (1 << 12)
-#define AT91_SMC_DBW_32 (2 << 12)
-#define AT91_SMC_TDF GENMASK(19, 16)
-#define AT91_SMC_TDF_(x) ((((x) - 1) << 16) & AT91_SMC_TDF)
-#define AT91_SMC_TDF_MAX 16
-#define AT91_SMC_TDFMODE_OPTIMIZED BIT(20)
-#define AT91_SMC_PMEN BIT(24)
-#define AT91_SMC_PS GENMASK(29, 28)
-#define AT91_SMC_PS_4 (0 << 28)
-#define AT91_SMC_PS_8 (1 << 28)
-#define AT91_SMC_PS_16 (2 << 28)
-#define AT91_SMC_PS_32 (3 << 28)
-
-
-/*
- * This function converts a setup timing expressed in nanoseconds into an
- * encoded value that can be written in the SMC_SETUP register.
- *
- * The following formula is described in atmel datasheets (section
- * "SMC Setup Register"):
- *
- * setup length = (128* SETUP[5] + SETUP[4:0])
- *
- * where setup length is the timing expressed in cycles.
- */
-static inline u32 at91sam9_smc_setup_ns_to_cycles(unsigned int clk_rate,
- u32 timing_ns)
-{
- u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
- u32 coded_cycles = 0;
- u32 cycles;
-
- cycles = DIV_ROUND_UP(timing_ns, clk_period);
- if (cycles / 32) {
- coded_cycles |= 1 << 5;
- if (cycles < 128)
- cycles = 0;
- }
-
- coded_cycles |= cycles % 32;
-
- return coded_cycles;
-}
-
-/*
- * This function converts a pulse timing expressed in nanoseconds into an
- * encoded value that can be written in the SMC_PULSE register.
- *
- * The following formula is described in atmel datasheets (section
- * "SMC Pulse Register"):
- *
- * pulse length = (256* PULSE[6] + PULSE[5:0])
- *
- * where pulse length is the timing expressed in cycles.
+#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
+#define ATMEL_HSMC_SETUP(cs) (0x600 + ((cs) * 0x14))
+#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4)
+#define ATMEL_HSMC_PULSE(cs) (0x600 + ((cs) * 0x14) + 0x4)
+#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8)
+#define ATMEL_HSMC_CYCLE(cs) (0x600 + ((cs) * 0x14) + 0x8)
+#define ATMEL_SMC_NWE_SHIFT 0
+#define ATMEL_SMC_NCS_WR_SHIFT 8
+#define ATMEL_SMC_NRD_SHIFT 16
+#define ATMEL_SMC_NCS_RD_SHIFT 24
+
+#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc)
+#define ATMEL_HSMC_MODE(cs) (0x600 + ((cs) * 0x14) + 0x10)
+#define ATMEL_SMC_MODE_READMODE_MASK BIT(0)
+#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0)
+#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0)
+#define ATMEL_SMC_MODE_WRITEMODE_MASK BIT(1)
+#define ATMEL_SMC_MODE_WRITEMODE_NCS (0 << 1)
+#define ATMEL_SMC_MODE_WRITEMODE_NWE (1 << 1)
+#define ATMEL_SMC_MODE_EXNWMODE_MASK GENMASK(5, 4)
+#define ATMEL_SMC_MODE_EXNWMODE_DISABLE (0 << 4)
+#define ATMEL_SMC_MODE_EXNWMODE_FROZEN (2 << 4)
+#define ATMEL_SMC_MODE_EXNWMODE_READY (3 << 4)
+#define ATMEL_SMC_MODE_BAT_MASK BIT(8)
+#define ATMEL_SMC_MODE_BAT_SELECT (0 << 8)
+#define ATMEL_SMC_MODE_BAT_WRITE (1 << 8)
+#define ATMEL_SMC_MODE_DBW_MASK GENMASK(13, 12)
+#define ATMEL_SMC_MODE_DBW_8 (0 << 12)
+#define ATMEL_SMC_MODE_DBW_16 (1 << 12)
+#define ATMEL_SMC_MODE_DBW_32 (2 << 12)
+#define ATMEL_SMC_MODE_TDF_MASK GENMASK(19, 16)
+#define ATMEL_SMC_MODE_TDF(x) (((x) - 1) << 16)
+#define ATMEL_SMC_MODE_TDF_MAX 16
+#define ATMEL_SMC_MODE_TDF_MIN 1
+#define ATMEL_SMC_MODE_TDFMODE_OPTIMIZED BIT(20)
+#define ATMEL_SMC_MODE_PMEN BIT(24)
+#define ATMEL_SMC_MODE_PS_MASK GENMASK(29, 28)
+#define ATMEL_SMC_MODE_PS_4 (0 << 28)
+#define ATMEL_SMC_MODE_PS_8 (1 << 28)
+#define ATMEL_SMC_MODE_PS_16 (2 << 28)
+#define ATMEL_SMC_MODE_PS_32 (3 << 28)
+
+#define ATMEL_HSMC_TIMINGS(cs) (0x600 + ((cs) * 0x14) + 0xc)
+#define ATMEL_HSMC_TIMINGS_OCMS BIT(12)
+#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28)
+#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31)
+#define ATMEL_HSMC_TIMINGS_TCLR_SHIFT 0
+#define ATMEL_HSMC_TIMINGS_TADL_SHIFT 4
+#define ATMEL_HSMC_TIMINGS_TAR_SHIFT 8
+#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16
+#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24
+
+/**
+ * struct atmel_smc_cs_conf - SMC CS config as described in the datasheet.
+ * @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200)
+ * @pulse: NCS/NWE/NRD pulse timings (not applicable to at91rm9200)
+ * @cycle: NWE/NRD cycle timings (not applicable to at91rm9200)
+ * @timings: advanced NAND related timings (only applicable to HSMC)
+ * @mode: all kind of config parameters (see the fields definition above).
+ * The mode fields are different on at91rm9200
*/
-static inline u32 at91sam9_smc_pulse_ns_to_cycles(unsigned int clk_rate,
- u32 timing_ns)
-{
- u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
- u32 coded_cycles = 0;
- u32 cycles;
-
- cycles = DIV_ROUND_UP(timing_ns, clk_period);
- if (cycles / 64) {
- coded_cycles |= 1 << 6;
- if (cycles < 256)
- cycles = 0;
- }
-
- coded_cycles |= cycles % 64;
-
- return coded_cycles;
-}
-
-/*
- * This function converts a cycle timing expressed in nanoseconds into an
- * encoded value that can be written in the SMC_CYCLE register.
- *
- * The following formula is described in atmel datasheets (section
- * "SMC Cycle Register"):
- *
- * cycle length = (CYCLE[8:7]*256 + CYCLE[6:0])
- *
- * where cycle length is the timing expressed in cycles.
- */
-static inline u32 at91sam9_smc_cycle_ns_to_cycles(unsigned int clk_rate,
- u32 timing_ns)
-{
- u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
- u32 coded_cycles = 0;
- u32 cycles;
-
- cycles = DIV_ROUND_UP(timing_ns, clk_period);
- if (cycles / 128) {
- coded_cycles = cycles / 256;
- cycles %= 256;
- if (cycles >= 128) {
- coded_cycles++;
- cycles = 0;
- }
-
- if (coded_cycles > 0x3) {
- coded_cycles = 0x3;
- cycles = 0x7f;
- }
-
- coded_cycles <<= 7;
- }
-
- coded_cycles |= cycles % 128;
-
- return coded_cycles;
-}
+struct atmel_smc_cs_conf {
+ u32 setup;
+ u32 pulse;
+ u32 cycle;
+ u32 timings;
+ u32 mode;
+};
+
+void atmel_smc_cs_conf_init(struct atmel_smc_cs_conf *conf);
+int atmel_smc_cs_conf_set_timing(struct atmel_smc_cs_conf *conf,
+ unsigned int shift,
+ unsigned int ncycles);
+int atmel_smc_cs_conf_set_setup(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int ncycles);
+int atmel_smc_cs_conf_set_pulse(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int ncycles);
+int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int ncycles);
+void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs,
+ const struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_apply(struct regmap *regmap, int cs,
+ const struct atmel_smc_cs_conf *conf);
+void atmel_smc_cs_conf_get(struct regmap *regmap, int cs,
+ struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_get(struct regmap *regmap, int cs,
+ struct atmel_smc_cs_conf *conf);
#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index c28ff21ca4d2..b4942a32b81d 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -12,41 +12,8 @@
#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-/* Exynos5 PMU register definitions */
-#define EXYNOS5_HDMI_PHY_CONTROL (0x700)
-#define EXYNOS5_USBDRD_PHY_CONTROL (0x704)
-
-/* Exynos5250 specific register definitions */
-#define EXYNOS5_USBHOST_PHY_CONTROL (0x708)
-#define EXYNOS5_EFNAND_PHY_CONTROL (0x70c)
-#define EXYNOS5_MIPI_PHY0_CONTROL (0x710)
-#define EXYNOS5_MIPI_PHY1_CONTROL (0x714)
-#define EXYNOS5_ADC_PHY_CONTROL (0x718)
-#define EXYNOS5_MTCADC_PHY_CONTROL (0x71c)
-#define EXYNOS5_DPTX_PHY_CONTROL (0x720)
-#define EXYNOS5_SATA_PHY_CONTROL (0x724)
-
-/* Exynos5420 specific register definitions */
-#define EXYNOS5420_USBDRD1_PHY_CONTROL (0x708)
-#define EXYNOS5420_USBHOST_PHY_CONTROL (0x70c)
-#define EXYNOS5420_MIPI_PHY0_CONTROL (0x714)
-#define EXYNOS5420_MIPI_PHY1_CONTROL (0x718)
-#define EXYNOS5420_MIPI_PHY2_CONTROL (0x71c)
-#define EXYNOS5420_ADC_PHY_CONTROL (0x720)
-#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724)
-#define EXYNOS5420_DPTX_PHY_CONTROL (0x728)
-
-/* Exynos5433 specific register definitions */
-#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728)
-#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710)
-#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714)
-#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718)
-
#define EXYNOS5_PHY_ENABLE BIT(0)
#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
-#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
-#define EXYNOS5433_PAD_INITIATE_WAKEUP_FROM_LOWPWR BIT(28)
-
#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index 4585d6105d68..abbd52466573 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -44,4 +44,8 @@
#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4)
+#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5)
+
+#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31)
+
#endif /* __LINUX_IMX7_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h
new file mode 100644
index 000000000000..2125c7c02818
--- /dev/null
+++ b/include/linux/mfd/ti-lmu-register.h
@@ -0,0 +1,280 @@
+/*
+ * TI LMU (Lighting Management Unit) Device Register Map
+ *
+ * Copyright 2017 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_TI_LMU_REGISTER_H__
+#define __MFD_TI_LMU_REGISTER_H__
+
+#include <linux/bitops.h>
+
+/* LM3532 */
+#define LM3532_REG_OUTPUT_CFG 0x10
+#define LM3532_ILED1_CFG_MASK 0x03
+#define LM3532_ILED2_CFG_MASK 0x0C
+#define LM3532_ILED3_CFG_MASK 0x30
+#define LM3532_ILED1_CFG_SHIFT 0
+#define LM3532_ILED2_CFG_SHIFT 2
+#define LM3532_ILED3_CFG_SHIFT 4
+
+#define LM3532_REG_RAMPUP 0x12
+#define LM3532_REG_RAMPDN LM3532_REG_RAMPUP
+#define LM3532_RAMPUP_MASK 0x07
+#define LM3532_RAMPUP_SHIFT 0
+#define LM3532_RAMPDN_MASK 0x38
+#define LM3532_RAMPDN_SHIFT 3
+
+#define LM3532_REG_ENABLE 0x1D
+
+#define LM3532_REG_PWM_A_CFG 0x13
+#define LM3532_PWM_A_MASK 0x05 /* zone 0 */
+#define LM3532_PWM_ZONE_0 BIT(2)
+
+#define LM3532_REG_PWM_B_CFG 0x14
+#define LM3532_PWM_B_MASK 0x09 /* zone 1 */
+#define LM3532_PWM_ZONE_1 BIT(3)
+
+#define LM3532_REG_PWM_C_CFG 0x15
+#define LM3532_PWM_C_MASK 0x11 /* zone 2 */
+#define LM3532_PWM_ZONE_2 BIT(4)
+
+#define LM3532_REG_ZONE_CFG_A 0x16
+#define LM3532_REG_ZONE_CFG_B 0x18
+#define LM3532_REG_ZONE_CFG_C 0x1A
+#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
+#define LM3532_ZONE_0 0
+#define LM3532_ZONE_1 BIT(2)
+#define LM3532_ZONE_2 BIT(3)
+
+#define LM3532_REG_BRT_A 0x70 /* zone 0 */
+#define LM3532_REG_BRT_B 0x76 /* zone 1 */
+#define LM3532_REG_BRT_C 0x7C /* zone 2 */
+
+#define LM3532_MAX_REG 0x7E
+
+/* LM3631 */
+#define LM3631_REG_DEVCTRL 0x00
+#define LM3631_LCD_EN_MASK BIT(1)
+#define LM3631_BL_EN_MASK BIT(0)
+
+#define LM3631_REG_BRT_LSB 0x01
+#define LM3631_REG_BRT_MSB 0x02
+
+#define LM3631_REG_BL_CFG 0x06
+#define LM3631_BL_CHANNEL_MASK BIT(3)
+#define LM3631_BL_DUAL_CHANNEL 0
+#define LM3631_BL_SINGLE_CHANNEL BIT(3)
+#define LM3631_MAP_MASK BIT(5)
+#define LM3631_EXPONENTIAL_MAP 0
+
+#define LM3631_REG_BRT_MODE 0x08
+#define LM3631_MODE_MASK (BIT(1) | BIT(2) | BIT(3))
+#define LM3631_DEFAULT_MODE (BIT(1) | BIT(3))
+
+#define LM3631_REG_SLOPE 0x09
+#define LM3631_SLOPE_MASK 0xF0
+#define LM3631_SLOPE_SHIFT 4
+
+#define LM3631_REG_LDO_CTRL1 0x0A
+#define LM3631_EN_OREF_MASK BIT(0)
+#define LM3631_EN_VNEG_MASK BIT(1)
+#define LM3631_EN_VPOS_MASK BIT(2)
+
+#define LM3631_REG_LDO_CTRL2 0x0B
+#define LM3631_EN_CONT_MASK BIT(0)
+
+#define LM3631_REG_VOUT_CONT 0x0C
+#define LM3631_VOUT_CONT_MASK (BIT(6) | BIT(7))
+
+#define LM3631_REG_VOUT_BOOST 0x0C
+#define LM3631_REG_VOUT_POS 0x0D
+#define LM3631_REG_VOUT_NEG 0x0E
+#define LM3631_REG_VOUT_OREF 0x0F
+#define LM3631_VOUT_MASK 0x3F
+
+#define LM3631_REG_ENTIME_VCONT 0x0B
+#define LM3631_ENTIME_CONT_MASK 0x70
+
+#define LM3631_REG_ENTIME_VOREF 0x0F
+#define LM3631_REG_ENTIME_VPOS 0x10
+#define LM3631_REG_ENTIME_VNEG 0x11
+#define LM3631_ENTIME_MASK 0xF0
+#define LM3631_ENTIME_SHIFT 4
+
+#define LM3631_MAX_REG 0x16
+
+/* LM3632 */
+#define LM3632_REG_CONFIG1 0x02
+#define LM3632_OVP_MASK (BIT(5) | BIT(6) | BIT(7))
+#define LM3632_OVP_25V BIT(6)
+
+#define LM3632_REG_CONFIG2 0x03
+#define LM3632_SWFREQ_MASK BIT(7)
+#define LM3632_SWFREQ_1MHZ BIT(7)
+
+#define LM3632_REG_BRT_LSB 0x04
+#define LM3632_REG_BRT_MSB 0x05
+
+#define LM3632_REG_IO_CTRL 0x09
+#define LM3632_PWM_MASK BIT(6)
+#define LM3632_I2C_MODE 0
+#define LM3632_PWM_MODE BIT(6)
+
+#define LM3632_REG_ENABLE 0x0A
+#define LM3632_BL_EN_MASK BIT(0)
+#define LM3632_BL_CHANNEL_MASK (BIT(3) | BIT(4))
+#define LM3632_BL_SINGLE_CHANNEL BIT(4)
+#define LM3632_BL_DUAL_CHANNEL BIT(3)
+
+#define LM3632_REG_BIAS_CONFIG 0x0C
+#define LM3632_EXT_EN_MASK BIT(0)
+#define LM3632_EN_VNEG_MASK BIT(1)
+#define LM3632_EN_VPOS_MASK BIT(2)
+
+#define LM3632_REG_VOUT_BOOST 0x0D
+#define LM3632_REG_VOUT_POS 0x0E
+#define LM3632_REG_VOUT_NEG 0x0F
+#define LM3632_VOUT_MASK 0x3F
+
+#define LM3632_MAX_REG 0x10
+
+/* LM3633 */
+#define LM3633_REG_HVLED_OUTPUT_CFG 0x10
+#define LM3633_HVLED1_CFG_MASK BIT(0)
+#define LM3633_HVLED2_CFG_MASK BIT(1)
+#define LM3633_HVLED3_CFG_MASK BIT(2)
+#define LM3633_HVLED1_CFG_SHIFT 0
+#define LM3633_HVLED2_CFG_SHIFT 1
+#define LM3633_HVLED3_CFG_SHIFT 2
+
+#define LM3633_REG_BANK_SEL 0x11
+
+#define LM3633_REG_BL0_RAMP 0x12
+#define LM3633_REG_BL1_RAMP 0x13
+#define LM3633_BL_RAMPUP_MASK 0xF0
+#define LM3633_BL_RAMPUP_SHIFT 4
+#define LM3633_BL_RAMPDN_MASK 0x0F
+#define LM3633_BL_RAMPDN_SHIFT 0
+
+#define LM3633_REG_BL_RAMP_CONF 0x1B
+#define LM3633_BL_RAMP_MASK 0x0F
+#define LM3633_BL_RAMP_EACH 0x05
+
+#define LM3633_REG_PTN0_RAMP 0x1C
+#define LM3633_REG_PTN1_RAMP 0x1D
+#define LM3633_PTN_RAMPUP_MASK 0x70
+#define LM3633_PTN_RAMPUP_SHIFT 4
+#define LM3633_PTN_RAMPDN_MASK 0x07
+#define LM3633_PTN_RAMPDN_SHIFT 0
+
+#define LM3633_REG_LED_MAPPING_MODE 0x1F
+#define LM3633_LED_EXPONENTIAL BIT(1)
+
+#define LM3633_REG_IMAX_HVLED_A 0x20
+#define LM3633_REG_IMAX_HVLED_B 0x21
+#define LM3633_REG_IMAX_LVLED_BASE 0x22
+
+#define LM3633_REG_BL_FEEDBACK_ENABLE 0x28
+
+#define LM3633_REG_ENABLE 0x2B
+#define LM3633_LED_BANK_OFFSET 2
+
+#define LM3633_REG_PATTERN 0x2C
+
+#define LM3633_REG_BOOST_CFG 0x2D
+#define LM3633_OVP_MASK (BIT(1) | BIT(2))
+#define LM3633_OVP_40V 0x6
+
+#define LM3633_REG_PWM_CFG 0x2F
+#define LM3633_PWM_A_MASK BIT(0)
+#define LM3633_PWM_B_MASK BIT(1)
+
+#define LM3633_REG_BRT_HVLED_A_LSB 0x40
+#define LM3633_REG_BRT_HVLED_A_MSB 0x41
+#define LM3633_REG_BRT_HVLED_B_LSB 0x42
+#define LM3633_REG_BRT_HVLED_B_MSB 0x43
+
+#define LM3633_REG_BRT_LVLED_BASE 0x44
+
+#define LM3633_REG_PTN_DELAY 0x50
+
+#define LM3633_REG_PTN_LOWTIME 0x51
+
+#define LM3633_REG_PTN_HIGHTIME 0x52
+
+#define LM3633_REG_PTN_LOWBRT 0x53
+
+#define LM3633_REG_PTN_HIGHBRT LM3633_REG_BRT_LVLED_BASE
+
+#define LM3633_REG_BL_OPEN_FAULT_STATUS 0xB0
+
+#define LM3633_REG_BL_SHORT_FAULT_STATUS 0xB2
+
+#define LM3633_REG_MONITOR_ENABLE 0xB4
+
+#define LM3633_MAX_REG 0xB4
+
+/* LM3695 */
+#define LM3695_REG_GP 0x10
+#define LM3695_BL_CHANNEL_MASK BIT(3)
+#define LM3695_BL_DUAL_CHANNEL 0
+#define LM3695_BL_SINGLE_CHANNEL BIT(3)
+#define LM3695_BRT_RW_MASK BIT(2)
+#define LM3695_BL_EN_MASK BIT(0)
+
+#define LM3695_REG_BRT_LSB 0x13
+#define LM3695_REG_BRT_MSB 0x14
+
+#define LM3695_MAX_REG 0x14
+
+/* LM3697 */
+#define LM3697_REG_HVLED_OUTPUT_CFG 0x10
+#define LM3697_HVLED1_CFG_MASK BIT(0)
+#define LM3697_HVLED2_CFG_MASK BIT(1)
+#define LM3697_HVLED3_CFG_MASK BIT(2)
+#define LM3697_HVLED1_CFG_SHIFT 0
+#define LM3697_HVLED2_CFG_SHIFT 1
+#define LM3697_HVLED3_CFG_SHIFT 2
+
+#define LM3697_REG_BL0_RAMP 0x11
+#define LM3697_REG_BL1_RAMP 0x12
+#define LM3697_RAMPUP_MASK 0xF0
+#define LM3697_RAMPUP_SHIFT 4
+#define LM3697_RAMPDN_MASK 0x0F
+#define LM3697_RAMPDN_SHIFT 0
+
+#define LM3697_REG_RAMP_CONF 0x14
+#define LM3697_RAMP_MASK 0x0F
+#define LM3697_RAMP_EACH 0x05
+
+#define LM3697_REG_PWM_CFG 0x1C
+#define LM3697_PWM_A_MASK BIT(0)
+#define LM3697_PWM_B_MASK BIT(1)
+
+#define LM3697_REG_IMAX_A 0x17
+#define LM3697_REG_IMAX_B 0x18
+
+#define LM3697_REG_FEEDBACK_ENABLE 0x19
+
+#define LM3697_REG_BRT_A_LSB 0x20
+#define LM3697_REG_BRT_A_MSB 0x21
+#define LM3697_REG_BRT_B_LSB 0x22
+#define LM3697_REG_BRT_B_MSB 0x23
+
+#define LM3697_REG_ENABLE 0x24
+
+#define LM3697_REG_OPEN_FAULT_STATUS 0xB0
+
+#define LM3697_REG_SHORT_FAULT_STATUS 0xB2
+
+#define LM3697_REG_MONITOR_ENABLE 0xB4
+
+#define LM3697_MAX_REG 0xB4
+#endif
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
new file mode 100644
index 000000000000..09d5f30384e5
--- /dev/null
+++ b/include/linux/mfd/ti-lmu.h
@@ -0,0 +1,87 @@
+/*
+ * TI LMU (Lighting Management Unit) Devices
+ *
+ * Copyright 2017 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_TI_LMU_H__
+#define __MFD_TI_LMU_H__
+
+#include <linux/gpio.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+
+/* Notifier event */
+#define LMU_EVENT_MONITOR_DONE 0x01
+
+enum ti_lmu_id {
+ LM3532,
+ LM3631,
+ LM3632,
+ LM3633,
+ LM3695,
+ LM3697,
+ LMU_MAX_ID,
+};
+
+enum ti_lmu_max_current {
+ LMU_IMAX_5mA,
+ LMU_IMAX_6mA,
+ LMU_IMAX_7mA = 0x03,
+ LMU_IMAX_8mA,
+ LMU_IMAX_9mA,
+ LMU_IMAX_10mA = 0x07,
+ LMU_IMAX_11mA,
+ LMU_IMAX_12mA,
+ LMU_IMAX_13mA,
+ LMU_IMAX_14mA,
+ LMU_IMAX_15mA = 0x0D,
+ LMU_IMAX_16mA,
+ LMU_IMAX_17mA,
+ LMU_IMAX_18mA,
+ LMU_IMAX_19mA,
+ LMU_IMAX_20mA = 0x13,
+ LMU_IMAX_21mA,
+ LMU_IMAX_22mA,
+ LMU_IMAX_23mA = 0x17,
+ LMU_IMAX_24mA,
+ LMU_IMAX_25mA,
+ LMU_IMAX_26mA,
+ LMU_IMAX_27mA = 0x1C,
+ LMU_IMAX_28mA,
+ LMU_IMAX_29mA,
+ LMU_IMAX_30mA,
+};
+
+enum lm363x_regulator_id {
+ LM3631_BOOST, /* Boost output */
+ LM3631_LDO_CONT, /* Display panel controller */
+ LM3631_LDO_OREF, /* Gamma reference */
+ LM3631_LDO_POS, /* Positive display bias output */
+ LM3631_LDO_NEG, /* Negative display bias output */
+ LM3632_BOOST, /* Boost output */
+ LM3632_LDO_POS, /* Positive display bias output */
+ LM3632_LDO_NEG, /* Negative display bias output */
+};
+
+/**
+ * struct ti_lmu
+ *
+ * @dev: Parent device pointer
+ * @regmap: Used for i2c communcation on accessing registers
+ * @en_gpio: GPIO for HWEN pin [Optional]
+ * @notifier: Notifier for reporting hwmon event
+ */
+struct ti_lmu {
+ struct device *dev;
+ struct regmap *regmap;
+ int en_gpio;
+ struct blocking_notifier_head notifier;
+};
+#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 76c22648436f..b49fa67612f1 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -21,6 +21,8 @@
#include <linux/list.h>
#include <linux/regmap.h>
#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/of.h>
/*
* Register values.
@@ -367,6 +369,9 @@ struct wm831x {
struct regmap *regmap;
+ struct wm831x_pdata pdata;
+ enum wm831x_parent type;
+
int irq; /* Our chip IRQ */
struct mutex irq_lock;
struct irq_domain *irq_domain;
@@ -412,7 +417,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int count, u16 *buf);
-int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq);
+int wm831x_device_init(struct wm831x *wm831x, int irq);
void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
void wm831x_device_shutdown(struct wm831x *wm831x);
@@ -427,4 +432,6 @@ static inline int wm831x_irq(struct wm831x *wm831x, int irq)
extern struct regmap_config wm831x_regmap_config;
+extern const struct of_device_id wm831x_of_match[];
+
#endif
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index e11f4d9f1c2e..000000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * include/linux/mg_disk.c
- *
- * Private data for mflash platform driver
- *
- * (c) 2008 mGine Co.,LTD
- * (c) 2008 unsik Kim <donari75@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MG_DISK_H__
-#define __MG_DISK_H__
-
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
-
-/* names of GPIO resource */
-#define MG_RST_PIN "mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN "mg_rstout"
-
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV (1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV (1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
-
-/* private driver data */
-struct mg_drv_data {
- /* disk resource */
- u32 use_polling;
-
- /* device attribution */
- u32 dev_attr;
-
- /* internally used */
- void *host;
-};
-
-#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index fa76b516fa47..48e24844b3c5 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -33,8 +33,9 @@ extern char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *,
- struct page *, struct page *, enum migrate_mode);
+extern int migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 1beb1ec2fbdf..d5bed0875d30 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -108,7 +108,7 @@ enum {
MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
};
-/* Driver supports 3 diffrent device methods to manage traffic steering:
+/* Driver supports 3 different device methods to manage traffic steering:
* -device managed - High level API for ib and eth flow steering. FW is
* managing flow steering tables.
* - B0 steering mode - Common low level API for ib and (if supported) eth.
@@ -1011,8 +1011,7 @@ struct mlx4_mad_ifc {
#define mlx4_foreach_ib_transport_port(port, dev) \
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
- ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \
- ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2))
+ ((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_ETH))
#define MLX4_INVALID_SLAVE_ID 0xFF
#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index dd9a263ed368..a940ec6a046c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -787,8 +787,14 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 6,
- CQE_RSS_HTYPE_L4 = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = 0x3 << 2,
+ /* cqe->rss_hash_type[3:2] - IP destination selected for hash
+ * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
+ */
+ CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
+ * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
+ */
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2fcff6b4503f..93273d9ea4d1 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -540,6 +540,7 @@ struct mlx5_fc_stats {
struct workqueue_struct *wq;
struct delayed_work work;
unsigned long next_query;
+ unsigned long sampling_interval; /* jiffies */
};
struct mlx5_eswitch;
@@ -728,6 +729,7 @@ struct mlx5e_resources {
u32 pdn;
struct mlx5_td td;
struct mlx5_core_mkey mkey;
+ struct mlx5_sq_bfreg bfreg;
};
struct mlx5_core_dev {
@@ -785,7 +787,12 @@ enum {
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
+enum {
+ MLX5_CMD_ENT_STATE_PENDING_COMP,
+};
+
struct mlx5_cmd_work_ent {
+ unsigned long state;
struct mlx5_cmd_msg *in;
struct mlx5_cmd_msg *out;
void *uout;
@@ -890,12 +897,7 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
static inline void *mlx5_vzalloc(unsigned long size)
{
- void *rtn;
-
- rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!rtn)
- rtn = vzalloc(size);
- return rtn;
+ return kvzalloc(size, GFP_KERNEL);
}
static inline u32 mlx5_base_mkey(const u32 key)
@@ -979,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name,
@@ -1100,6 +1102,25 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
+#ifndef CONFIG_MLX5_CORE_IPOIB
+static inline
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *))
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
+#else
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *));
+void mlx5_rdma_netdev_free(struct net_device *netdev);
+#endif /* CONFIG_MLX5_CORE_IPOIB */
+
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 949b24b6c479..b25e7baa273e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -104,12 +104,17 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
u32 level,
u32 flags);
+struct mlx5_flow_table_attr {
+ int prio;
+ int max_fte;
+ u32 level;
+ u32 flags;
+};
+
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level,
- u32 flags);
+ struct mlx5_flow_table_attr *ft_attr);
+
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -134,8 +139,13 @@ struct mlx5_flow_act {
u32 action;
u32 flow_tag;
u32 encap_id;
+ u32 modify_id;
};
+#define MLX5_DECLARE_FLOW_ACT(name) \
+ struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+ MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
@@ -156,5 +166,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 838242697541..32de0724b400 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -227,6 +227,8 @@ enum {
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_MAX
};
@@ -234,7 +236,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
u8 outer_ether_type[0x1];
- u8 reserved_at_3[0x1];
+ u8 outer_ip_version[0x1];
u8 outer_first_prio[0x1];
u8 outer_first_cfi[0x1];
u8 outer_first_vid[0x1];
@@ -263,7 +265,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_dmac[0x1];
u8 inner_smac[0x1];
u8 inner_ether_type[0x1];
- u8 reserved_at_23[0x1];
+ u8 inner_ip_version[0x1];
u8 inner_first_prio[0x1];
u8 inner_first_cfi[0x1];
u8 inner_first_vid[0x1];
@@ -302,7 +304,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
- u8 reserved_at_28[0x10];
+ u8 log_max_modify_header_context[0x8];
+ u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
u8 reserved_at_40[0x20];
@@ -368,7 +371,7 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 cvlan_tag[0x1];
u8 svlan_tag[0x1];
u8 frag[0x1];
- u8 reserved_at_93[0x4];
+ u8 ip_version[0x4];
u8 tcp_flags[0x9];
u8 tcp_sport[0x10];
@@ -869,7 +872,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 compact_address_vector[0x1];
u8 striding_rq[0x1];
- u8 reserved_at_202[0x2];
+ u8 reserved_at_202[0x1];
+ u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
u8 reserved_at_205[0xa];
u8 drain_sigerr[0x1];
@@ -1452,7 +1456,9 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 vl_15_dropped[0x10];
- u8 reserved_at_a0[0xa0];
+ u8 reserved_at_a0[0x80];
+
+ u8 port_xmit_wait[0x20];
};
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
@@ -2190,6 +2196,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
};
struct mlx5_ifc_flow_context_bits {
@@ -2211,7 +2218,9 @@ struct mlx5_ifc_flow_context_bits {
u8 encap_id[0x20];
- u8 reserved_at_e0[0x120];
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_100[0x100];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -2287,7 +2296,9 @@ struct mlx5_ifc_tisc_bits {
u8 reserved_at_120[0x8];
u8 transport_domain[0x18];
- u8 reserved_at_140[0x3c0];
+ u8 reserved_at_140[0x8];
+ u8 underlay_qpn[0x18];
+ u8 reserved_at_160[0x3a0];
};
enum {
@@ -4534,6 +4545,109 @@ struct mlx5_ifc_dealloc_encap_header_in_bits {
u8 reserved_60[0x20];
};
+struct mlx5_ifc_set_action_in_bits {
+ u8 action_type[0x4];
+ u8 field[0xc];
+ u8 reserved_at_10[0x3];
+ u8 offset[0x5];
+ u8 reserved_at_18[0x3];
+ u8 length[0x5];
+
+ u8 data[0x20];
+};
+
+struct mlx5_ifc_add_action_in_bits {
+ u8 action_type[0x4];
+ u8 field[0xc];
+ u8 reserved_at_10[0x10];
+
+ u8 data[0x20];
+};
+
+union mlx5_ifc_set_action_in_add_action_in_auto_bits {
+ struct mlx5_ifc_set_action_in_bits set_action_in;
+ struct mlx5_ifc_add_action_in_bits add_action_in;
+ u8 reserved_at_0[0x40];
+};
+
+enum {
+ MLX5_ACTION_TYPE_SET = 0x1,
+ MLX5_ACTION_TYPE_ADD = 0x2,
+};
+
+enum {
+ MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1,
+ MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2,
+ MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3,
+ MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4,
+ MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5,
+ MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9,
+ MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa,
+ MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb,
+ MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
+};
+
+struct mlx5_ifc_alloc_modify_header_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_at_68[0x10];
+ u8 num_of_actions[0x8];
+
+ union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
+};
+
+struct mlx5_ifc_dealloc_modify_header_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4623,17 +4737,17 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
u8 reserved_at_40[0x40];
- u8 cur_flows[0x20];
+ u8 rp_cur_flows[0x20];
u8 sum_flows[0x20];
- u8 cnp_ignored_high[0x20];
+ u8 rp_cnp_ignored_high[0x20];
- u8 cnp_ignored_low[0x20];
+ u8 rp_cnp_ignored_low[0x20];
- u8 cnp_handled_high[0x20];
+ u8 rp_cnp_handled_high[0x20];
- u8 cnp_handled_low[0x20];
+ u8 rp_cnp_handled_low[0x20];
u8 reserved_at_140[0x100];
@@ -4643,13 +4757,13 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
u8 accumulators_period[0x20];
- u8 ecn_marked_roce_packets_high[0x20];
+ u8 np_ecn_marked_roce_packets_high[0x20];
- u8 ecn_marked_roce_packets_low[0x20];
+ u8 np_ecn_marked_roce_packets_low[0x20];
- u8 cnps_sent_high[0x20];
+ u8 np_cnp_sent_high[0x20];
- u8 cnps_sent_low[0x20];
+ u8 np_cnp_sent_low[0x20];
u8 reserved_at_320[0x560];
};
@@ -5013,6 +5127,7 @@ struct mlx5_ifc_modify_rq_out_bits {
enum {
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
};
@@ -8108,7 +8223,9 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x140];
+ u8 reserved_at_c0[0x8];
+ u8 underlay_qpn[0x18];
+ u8 reserved_at_e0[0x120];
};
enum {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 3096370fe831..bef80d0a0e30 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -295,6 +295,16 @@ struct mlx5_av {
u8 rgid[16];
};
+struct mlx5_ib_ah {
+ struct ib_ah ibah;
+ struct mlx5_av av;
+};
+
+static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct mlx5_ib_ah, ibah);
+}
+
struct mlx5_wqe_datagram_seg {
struct mlx5_av av;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 00a8fa7e366a..7cb17c6b97de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -432,6 +432,10 @@ static inline int pud_devmap(pud_t pud)
{
return 0;
}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
#endif
/*
@@ -514,6 +518,28 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
+static inline void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+
+ return kvmalloc(n * size, flags);
+}
+
extern void kvfree(const void *addr);
static inline atomic_t *compound_mapcount_ptr(struct page *page)
@@ -758,19 +784,11 @@ static inline enum zone_type page_zonenum(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void get_zone_device_page(struct page *page);
-void put_zone_device_page(struct page *page);
static inline bool is_zone_device_page(const struct page *page)
{
return page_zonenum(page) == ZONE_DEVICE;
}
#else
-static inline void get_zone_device_page(struct page *page)
-{
-}
-static inline void put_zone_device_page(struct page *page)
-{
-}
static inline bool is_zone_device_page(const struct page *page)
{
return false;
@@ -786,9 +804,6 @@ static inline void get_page(struct page *page)
*/
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
page_ref_inc(page);
-
- if (unlikely(is_zone_device_page(page)))
- get_zone_device_page(page);
}
static inline void put_page(struct page *page)
@@ -797,9 +812,6 @@ static inline void put_page(struct page *page)
if (put_page_testzero(page))
__put_page(page);
-
- if (unlikely(is_zone_device_page(page)))
- put_zone_device_page(page);
}
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -2497,7 +2509,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f60f45fe226f..45cdb27791a3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -367,6 +367,11 @@ struct mm_struct {
#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
+#endif
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 77e61e0a216a..aad015e0152b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -89,6 +89,7 @@ struct mmc_ext_csd {
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
bool ffu_capable; /* Firmware upgrade support */
+ bool cmdq_en; /* Command Queue enabled */
bool cmdq_support; /* Command Queue supported */
unsigned int cmdq_depth; /* Command Queue depth */
#define MMC_FIRMWARE_LEN 8
@@ -208,6 +209,7 @@ struct sdio_cis {
struct mmc_host;
struct sdio_func;
struct sdio_func_tuple;
+struct mmc_queue_req;
#define SDIO_MAX_FUNCS 7
@@ -267,6 +269,8 @@ struct mmc_card {
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
@@ -300,6 +304,10 @@ struct mmc_card {
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
+
+ struct mmc_queue_req *mqrq; /* Shared queue structure */
+ unsigned int bouncesz; /* Bounce buffer size */
+ int qdepth; /* Shared queue depth */
};
static inline bool mmc_large_sector(struct mmc_card *card)
@@ -307,6 +315,8 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+bool mmc_card_is_blockaddr(struct mmc_card *card);
+
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 83f1c4a9f03b..21385ac0c9b1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -17,6 +17,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
+#include <linux/dma-direction.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -499,6 +500,11 @@ static inline bool mmc_can_retune(struct mmc_host *host)
return host->can_retune == 1;
}
+static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index aab032a6ae61..97ca105347a6 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -53,7 +53,7 @@ struct sdio_func {
unsigned int state; /* function state */
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
- u8 tmpbuf[4]; /* DMA:able scratch buffer */
+ u8 *tmpbuf; /* DMA:able scratch buffer */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 51891fb0d3ce..c91b3bcd158f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
___pud; \
})
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
- pmd_t ___pmd; \
- \
- ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
- mmu_notifier_invalidate_range(__mm, ___haddr, \
- ___haddr + HPAGE_PMD_SIZE); \
- \
- ___pmd; \
-})
-
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8e02b3750fe0..ebaccd4e7d8c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,7 +35,7 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
-enum {
+enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
@@ -74,6 +74,11 @@ extern char * const migratetype_names[MIGRATE_TYPES];
# define is_migrate_cma_page(_page) false
#endif
+static inline bool is_migrate_movable(int mt)
+{
+ return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
+}
+
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
@@ -149,7 +154,6 @@ enum node_stat_item {
NR_UNEVICTABLE, /* " " " " " */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
- NR_PAGES_SCANNED, /* pages scanned since last reclaim */
WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE,
WORKINGSET_NODERECLAIM,
@@ -226,6 +230,8 @@ struct lruvec {
struct zone_reclaim_stat reclaim_stat;
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
+ /* Refaults at the time of last reclaim cycle */
+ unsigned long refaults;
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
@@ -630,6 +636,8 @@ typedef struct pglist_data {
int kswapd_order;
enum zone_type kswapd_classzone_idx;
+ int kswapd_failures; /* Number of 'reclaimed == 0' runs */
+
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8850fcaf50db..566fda587fcf 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -428,6 +428,16 @@ struct i2c_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+/* pci_epf */
+
+#define PCI_EPF_NAME_SIZE 20
+#define PCI_EPF_MODULE_PREFIX "pci_epf:"
+
+struct pci_epf_device_id {
+ char name[PCI_EPF_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
diff --git a/include/linux/module.h b/include/linux/module.h
index 0297c5cd7cdf..21f56393602f 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -493,6 +493,7 @@ static inline int module_is_live(struct module *mod)
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
+bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
@@ -582,7 +583,7 @@ extern bool try_module_get(struct module *module);
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
-static inline int try_module_get(struct module *module)
+static inline bool try_module_get(struct module *module)
{
return !module || module_is_live(module);
}
@@ -660,6 +661,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
return false;
}
+static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
+{
+ return false;
+}
+
static inline bool is_module_text_address(unsigned long addr)
{
return false;
@@ -674,9 +680,9 @@ static inline void __module_get(struct module *module)
{
}
-static inline int try_module_get(struct module *module)
+static inline bool try_module_get(struct module *module)
{
- return 1;
+ return true;
}
static inline void module_put(struct module *module)
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 52666d90ca94..6be1949ebcdf 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -60,9 +60,11 @@ struct kernel_param_ops {
* Flags available for kernel_param
*
* UNSAFE - the parameter is dangerous and setting it will taint the kernel
+ * HWPARAM - Hardware param not permitted in lockdown mode
*/
enum {
- KERNEL_PARAM_FL_UNSAFE = (1 << 0)
+ KERNEL_PARAM_FL_UNSAFE = (1 << 0),
+ KERNEL_PARAM_FL_HWPARAM = (1 << 1),
};
struct kernel_param {
@@ -451,6 +453,67 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
perm, -1, 0); \
__MODULE_PARM_TYPE(name, "array of " #type)
+enum hwparam_type {
+ hwparam_ioport, /* Module parameter configures an I/O port */
+ hwparam_iomem, /* Module parameter configures an I/O mem address */
+ hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */
+ hwparam_irq, /* Module parameter configures an I/O port */
+ hwparam_dma, /* Module parameter configures a DMA channel */
+ hwparam_dma_addr, /* Module parameter configures a DMA buffer address */
+ hwparam_other, /* Module parameter configures some other value */
+};
+
+/**
+ * module_param_hw_named - A parameter representing a hw parameters
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @hwtype: what the value represents (enum hwparam_type)
+ * @perm: visibility in sysfs.
+ *
+ * Usually it's a good idea to have variable names and user-exposed names the
+ * same, but that's harder if the variable must be non-static or is inside a
+ * structure. This allows exposure under a different name.
+ */
+#define module_param_hw_named(name, value, type, hwtype, perm) \
+ param_check_##type(name, &(value)); \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_ops_##type, &value, \
+ perm, -1, \
+ KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \
+ __MODULE_PARM_TYPE(name, #type)
+
+#define module_param_hw(name, type, hwtype, perm) \
+ module_param_hw_named(name, name, type, hwtype, perm)
+
+/**
+ * module_param_hw_array - A parameter representing an array of hw parameters
+ * @name: the name of the array variable
+ * @type: the type, as per module_param()
+ * @hwtype: what the value represents (enum hwparam_type)
+ * @nump: optional pointer filled in with the number written
+ * @perm: visibility in sysfs
+ *
+ * Input and output are as comma-separated values. Commas inside values
+ * don't work properly (eg. an array of charp).
+ *
+ * ARRAY_SIZE(@name) is used to determine the number of elements in the
+ * array, so the definition must be visible.
+ */
+#define module_param_hw_array(name, type, hwtype, nump, perm) \
+ param_check_##type(name, &(name)[0]); \
+ static const struct kparam_array __param_arr_##name \
+ = { .max = ARRAY_SIZE(name), .num = nump, \
+ .ops = &param_ops_##type, \
+ .elemsize = sizeof(name[0]), .elem = name }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_array_ops, \
+ .arr = &__param_arr_##name, \
+ perm, -1, \
+ KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \
+ __MODULE_PARM_TYPE(name, "array of " #type)
+
+
extern const struct kernel_param_ops param_array_ops;
extern const struct kernel_param_ops param_ops_string;
diff --git a/include/linux/mpls.h b/include/linux/mpls.h
index 9999145bc190..384fb22b6c43 100644
--- a/include/linux/mpls.h
+++ b/include/linux/mpls.h
@@ -3,4 +3,9 @@
#include <uapi/linux/mpls.h>
+#define MPLS_TTL_MASK (MPLS_LS_TTL_MASK >> MPLS_LS_TTL_SHIFT)
+#define MPLS_BOS_MASK (MPLS_LS_S_MASK >> MPLS_LS_S_SHIFT)
+#define MPLS_TC_MASK (MPLS_LS_TC_MASK >> MPLS_LS_TC_SHIFT)
+#define MPLS_LABEL_MASK (MPLS_LS_LABEL_MASK >> MPLS_LS_LABEL_SHIFT)
+
#endif /* _LINUX_MPLS_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index eebdc63cf6af..f8a2ef239c60 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -334,11 +334,6 @@ struct mtd_info {
int (*_get_device) (struct mtd_info *mtd);
void (*_put_device) (struct mtd_info *mtd);
- /* Backing device capabilities for this device
- * - provides mmap capabilities
- */
- struct backing_dev_info *backing_dev_info;
-
struct notifier_block reboot_notifier; /* default mode before reboot */
/* ECC status information */
@@ -393,7 +388,7 @@ static inline void mtd_set_of_node(struct mtd_info *mtd,
static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
{
- return mtd->dev.of_node;
+ return dev_of_node(&mtd->dev);
}
static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 9591e0fbe5bd..8f67b1581683 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -366,26 +366,6 @@ struct onfi_ext_param_page {
*/
} __packed;
-struct nand_onfi_vendor_micron {
- u8 two_plane_read;
- u8 read_cache;
- u8 read_unique_id;
- u8 dq_imped;
- u8 dq_imped_num_settings;
- u8 dq_imped_feat_addr;
- u8 rb_pulldown_strength;
- u8 rb_pulldown_strength_feat_addr;
- u8 rb_pulldown_strength_num_settings;
- u8 otp_mode;
- u8 otp_page_start;
- u8 otp_data_prot_addr;
- u8 otp_num_pages;
- u8 otp_feat_addr;
- u8 read_retry_options;
- u8 reserved[72];
- u8 param_revision;
-} __packed;
-
struct jedec_ecc_info {
u8 ecc_bits;
u8 codeword_size;
@@ -465,6 +445,17 @@ struct nand_jedec_params {
} __packed;
/**
+ * struct nand_id - NAND id structure
+ * @data: buffer containing the id bytes. Currently 8 bytes large, but can
+ * be extended if required.
+ * @len: ID length.
+ */
+struct nand_id {
+ u8 data[8];
+ int len;
+};
+
+/**
* struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
* @lock: protection lock
* @active: the mtd device which holds the controller currently
@@ -525,7 +516,7 @@ static inline void nand_hw_control_init(struct nand_hw_control *nfc)
* out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
- * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
+ * any single ECC step, -EIO hw error
* @read_subpage: function to read parts of the page covered by ECC;
* returns same as read_page()
* @write_subpage: function to write parts of the page covered by ECC.
@@ -721,6 +712,20 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
}
/**
+ * struct nand_manufacturer_ops - NAND Manufacturer operations
+ * @detect: detect the NAND memory organization and capabilities
+ * @init: initialize all vendor specific fields (like the ->read_retry()
+ * implementation) if any.
+ * @cleanup: the ->init() function may have allocated resources, ->cleanup()
+ * is here to let vendor specific code release those resources.
+ */
+struct nand_manufacturer_ops {
+ void (*detect)(struct nand_chip *chip);
+ int (*init)(struct nand_chip *chip);
+ void (*cleanup)(struct nand_chip *chip);
+};
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -750,6 +755,7 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buffers: buffer structure for read/write
+ * @buf_align: minimum buffer alignment required by a platform
* @hwcontrol: platform-specific hardware control structure
* @erase: [REPLACEABLE] erase function
* @scan_bbt: [REPLACEABLE] function to scan bad block table
@@ -793,6 +799,7 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
* currently in data_buf.
* @subpagesize: [INTERN] holds the subpagesize
+ * @id: [INTERN] holds NAND ID
* @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
* non 0 if ONFI supported.
* @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded),
@@ -822,7 +829,7 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* @errstat: [OPTIONAL] hardware specific function to perform
* additional error status checks (determine if errors are
* correctable).
- * @write_page: [REPLACEABLE] High-level page write function
+ * @manufacturer: [INTERN] Contains manufacturer information
*/
struct nand_chip {
@@ -847,9 +854,6 @@ struct nand_chip {
int (*scan_bbt)(struct mtd_info *mtd);
int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
int status, int page);
- int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -881,6 +885,7 @@ struct nand_chip {
int badblockpos;
int badblockbits;
+ struct nand_id id;
int onfi_version;
int jedec_version;
union {
@@ -901,6 +906,7 @@ struct nand_chip {
struct nand_ecc_ctrl ecc;
struct nand_buffers *buffers;
+ unsigned long buf_align;
struct nand_hw_control hwcontrol;
uint8_t *bbt;
@@ -910,6 +916,11 @@ struct nand_chip {
struct nand_bbt_descr *badblock_pattern;
void *priv;
+
+ struct {
+ const struct nand_manufacturer *desc;
+ void *priv;
+ } manufacturer;
};
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
@@ -946,6 +957,17 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
chip->priv = priv;
}
+static inline void nand_set_manufacturer_data(struct nand_chip *chip,
+ void *priv)
+{
+ chip->manufacturer.priv = priv;
+}
+
+static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
+{
+ return chip->manufacturer.priv;
+}
+
/*
* NAND Flash Manufacturer ID Codes
*/
@@ -1049,17 +1071,33 @@ struct nand_flash_dev {
};
/**
- * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
+ * struct nand_manufacturer - NAND Flash Manufacturer structure
* @name: Manufacturer name
* @id: manufacturer ID code of device.
+ * @ops: manufacturer operations
*/
-struct nand_manufacturers {
+struct nand_manufacturer {
int id;
char *name;
+ const struct nand_manufacturer_ops *ops;
};
+const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+
+static inline const char *
+nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
+{
+ return manufacturer ? manufacturer->name : "Unknown";
+}
+
extern struct nand_flash_dev nand_flash_ids[];
-extern struct nand_manufacturers nand_manuf_ids[];
+
+extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
+extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
+extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
+extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
int nand_default_bbt(struct mtd_info *mtd);
int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
@@ -1226,4 +1264,6 @@ int nand_reset(struct nand_chip *chip, int chipnr);
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
+/* Default extended ID decoding function */
+void nand_decode_ext_id(struct nand_chip *chip);
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index f29abda31e6d..8b4794e83196 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -44,6 +44,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_JUMPED 0x1000
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
+#define LOOKUP_DOWN 0x8000
extern int path_pts(struct path *path);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index fa66aeed441a..194b8e002ea7 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -48,7 +48,7 @@ struct nd_namespace_common {
struct device dev;
struct device *claim;
int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
- void *buf, size_t size, int rw);
+ void *buf, size_t size, int rw, unsigned long flags);
};
static inline struct nd_namespace_common *to_ndns(struct device *dev)
@@ -134,9 +134,10 @@ static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *
* @buf is up-to-date upon return from this routine.
*/
static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
- resource_size_t offset, void *buf, size_t size)
+ resource_size_t offset, void *buf, size_t size,
+ unsigned long flags)
{
- return ndns->rw_bytes(ndns, offset, buf, size, READ);
+ return ndns->rw_bytes(ndns, offset, buf, size, READ, flags);
}
/**
@@ -152,9 +153,10 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
* to media is handled internal to the @ndns driver, if at all.
*/
static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
- resource_size_t offset, void *buf, size_t size)
+ resource_size_t offset, void *buf, size_t size,
+ unsigned long flags)
{
- return ndns->rw_bytes(ndns, offset, buf, size, WRITE);
+ return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags);
}
#define MODULE_ALIAS_ND_DEVICE(type) \
diff --git a/include/linux/net.h b/include/linux/net.h
index 0620f5e18c96..abcfa46a2bd9 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -298,6 +298,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
+/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
+u32 kernel_sock_ip_overhead(struct sock *sk);
+
#define MODULE_ALIAS_NETPROTO(proto) \
MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 9a0419594e84..1d4737cffc71 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -54,8 +54,9 @@ enum {
*/
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
+ NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_SCTP_BIT,
+ NETIF_F_GSO_ESP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -73,6 +74,8 @@ enum {
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
+ NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
+ NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
/*
* Add your fresh new feature above and remember to update
@@ -129,11 +132,14 @@ enum {
#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
+#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
+#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
+#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 97456b2539e4..3f39d27decf4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -41,7 +41,6 @@
#include <linux/ethtool.h>
#include <net/net_namespace.h>
-#include <net/dsa.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
@@ -57,6 +56,8 @@
struct netpoll_info;
struct device;
struct phy_device;
+struct dsa_switch_tree;
+
/* 802.11 specific */
struct wireless_dev;
/* 802.15.4 specific */
@@ -236,8 +237,7 @@ struct netdev_hw_addr_list {
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
- u16 hh_len;
- u16 __pad;
+ unsigned int hh_len;
seqlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
@@ -786,11 +786,11 @@ struct tc_cls_u32_offload;
struct tc_to_netdev {
unsigned int type;
union {
- u8 tc;
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
struct tc_cls_bpf_offload *cls_bpf;
+ struct tc_mqprio_qopt *mqprio;
};
bool egress_dev;
};
@@ -813,16 +813,31 @@ enum xdp_netdev_command {
XDP_QUERY_PROG,
};
+struct netlink_ext_ack;
+
struct netdev_xdp {
enum xdp_netdev_command command;
union {
/* XDP_SETUP_PROG */
- struct bpf_prog *prog;
+ struct {
+ struct bpf_prog *prog;
+ struct netlink_ext_ack *extack;
+ };
/* XDP_QUERY_PROG */
bool prog_attached;
};
};
+#ifdef CONFIG_XFRM_OFFLOAD
+struct xfrmdev_ops {
+ int (*xdo_dev_state_add) (struct xfrm_state *x);
+ void (*xdo_dev_state_delete) (struct xfrm_state *x);
+ void (*xdo_dev_state_free) (struct xfrm_state *x);
+ bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
+ struct xfrm_state *x);
+};
+#endif
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1696,6 +1711,10 @@ struct net_device {
const struct ndisc_ops *ndisc_ops;
#endif
+#ifdef CONFIG_XFRM
+ const struct xfrmdev_ops *xfrmdev_ops;
+#endif
+
const struct header_ops *header_ops;
unsigned int flags;
@@ -1715,7 +1734,7 @@ struct net_device {
unsigned int max_mtu;
unsigned short type;
unsigned short hard_header_len;
- unsigned short min_header_len;
+ unsigned char min_header_len;
unsigned short needed_headroom;
unsigned short needed_tailroom;
@@ -1776,6 +1795,7 @@ struct net_device {
unsigned int real_num_rx_queues;
#endif
+ struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -1894,6 +1914,13 @@ struct net_device {
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
+static inline bool netif_elide_gro(const struct net_device *dev)
+{
+ if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
+ return true;
+ return false;
+}
+
#define NETDEV_ALIGN 32
static inline
@@ -2004,15 +2031,6 @@ void dev_net_set(struct net_device *dev, struct net *net)
write_pnet(&dev->nd_net, net);
}
-static inline bool netdev_uses_dsa(struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_NET_DSA)
- if (dev->dsa_ptr != NULL)
- return dsa_uses_tagged_protocol(dev->dsa_ptr);
-#endif
- return false;
-}
-
/**
* netdev_priv - access network device private data
* @dev: network device
@@ -3278,10 +3296,15 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
+
+typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
+int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
+ int fd, u32 flags);
+bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op);
+
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
@@ -3305,6 +3328,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
+extern unsigned int netdev_budget_usecs;
/* Called by rtnetlink.c:rtnl_unlock() */
void netdev_run_todo(void);
@@ -3394,10 +3418,10 @@ static inline void netif_dormant_off(struct net_device *dev)
}
/**
- * netif_dormant - test if carrier present
+ * netif_dormant - test if device is dormant
* @dev: network device
*
- * Check if carrier is present on device
+ * Check if device is dormant.
*/
static inline bool netif_dormant(const struct net_device *dev)
{
@@ -4078,6 +4102,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -4180,6 +4205,11 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
return dev->priv_flags & IFF_OPENVSWITCH;
}
+static inline bool netif_is_ovs_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OVS_DATAPATH;
+}
+
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 1b49209dd5c7..996711d8a7b4 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -41,6 +41,11 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
int flags);
+static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
+{
+ return subsys << 8 | msg_type;
+}
+
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index be378cf47fcc..b3044c2c62cb 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m,
int xt_target_to_user(const struct xt_entry_target *t,
struct xt_entry_target __user *u);
int xt_data_to_user(void __user *dst, const void *src,
- int usersize, int size);
+ int usersize, int size, int aligned_size);
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
struct xt_counters_info *info, bool compat);
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 984b2112c77b..e0cbf17af780 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -109,8 +109,10 @@ struct ebt_table {
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
extern struct ebt_table *ebt_register_table(struct net *net,
- const struct ebt_table *table);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
+ const struct ebt_table *table,
+ const struct nf_hook_ops *);
+extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
+ const struct nf_hook_ops *);
extern unsigned int ebt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct ebt_table *table);
@@ -123,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
/* True if the target is not a standard target */
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
+static inline bool ebt_invalid_target(int target)
+{
+ return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
#endif
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index da14ab61f363..5fff5ba5964e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -62,11 +62,47 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
}
+/* this can be increased when necessary - don't expose to userland */
+#define NETLINK_MAX_COOKIE_LEN 20
+
+/**
+ * struct netlink_ext_ack - netlink extended ACK report struct
+ * @_msg: message string to report - don't access directly, use
+ * %NL_SET_ERR_MSG
+ * @bad_attr: attribute with error
+ * @cookie: cookie data to return to userspace (for success)
+ * @cookie_len: actual cookie data length
+ */
+struct netlink_ext_ack {
+ const char *_msg;
+ const struct nlattr *bad_attr;
+ u8 cookie[NETLINK_MAX_COOKIE_LEN];
+ u8 cookie_len;
+};
+
+/* Always use this macro, this allows later putting the
+ * message into a separate section or such for things
+ * like translation or listing all possible messages.
+ * Currently string formatting is not supported (due
+ * to the lack of an output buffer.)
+ */
+#define NL_SET_ERR_MSG(extack, msg) do { \
+ static const char __msg[] = (msg); \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) \
+ __extack->_msg = __msg; \
+} while (0)
+
+#define NL_SET_ERR_MSG_MOD(extack, msg) \
+ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
+
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
+extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 287f34161086..bb0eb2c9acca 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -76,6 +76,7 @@ struct nfs_open_context {
#define NFS_CONTEXT_ERROR_WRITE (0)
#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
+#define NFS_CONTEXT_UNLOCK (3)
int error;
struct list_head list;
@@ -499,25 +500,13 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder);
+extern int nfs_wb_page(struct inode *inode, struct page *page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data);
static inline int
-nfs_wb_launder_page(struct inode *inode, struct page *page)
-{
- return nfs_wb_single_page(inode, page, true);
-}
-
-static inline int
-nfs_wb_page(struct inode *inode, struct page *page)
-{
- return nfs_wb_single_page(inode, page, false);
-}
-
-static inline int
nfs_have_writebacks(struct inode *inode)
{
return NFS_I(inode)->nrequests != 0;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b34097c67848..e418a1096662 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -133,7 +133,6 @@ struct nfs_server {
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
- struct backing_dev_info backing_dev_info;
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
unsigned int caps; /* server capabilities */
@@ -222,6 +221,7 @@ struct nfs_server {
u32 mountd_version;
unsigned short mountd_port;
unsigned short mountd_protocol;
+ struct rpc_wait_queue uoc_rpcwaitq;
};
/* Server capabilities */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 957049f72290..247cc3d3498f 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -64,7 +64,6 @@ struct nfs_pageio_ops {
};
struct nfs_rw_ops {
- const fmode_t rw_mode;
struct nfs_pgio_header *(*rw_alloc_header)(void);
void (*rw_free_header)(struct nfs_pgio_header *);
int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
@@ -124,7 +123,8 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
const struct nfs_pgio_completion_ops *compl_ops,
const struct nfs_rw_ops *rw_ops,
size_t bsize,
- int how);
+ int how,
+ gfp_t gfp_flags);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern int nfs_pageio_resend(struct nfs_pageio_descriptor *,
@@ -141,6 +141,7 @@ extern int nfs_page_group_lock(struct nfs_page *, bool);
extern void nfs_page_group_lock_wait(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/*
* Lock the page of an asynchronous request
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 348f7c158084..b28c83475ee8 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1383,6 +1383,7 @@ struct nfs42_copy_res {
struct nfs42_write_res write_res;
bool consecutive;
bool synchronous;
+ struct nfs_commitres commit_res;
};
struct nfs42_seek_args {
@@ -1427,6 +1428,7 @@ struct nfs_pgio_header {
struct list_head pages;
struct nfs_page *req;
struct nfs_writeverf verf; /* Used for writes */
+ fmode_t rw_mode;
struct pnfs_layout_segment *lseg;
loff_t io_start;
const struct rpc_call_ops *mds_ops;
@@ -1550,6 +1552,7 @@ struct nfs_rpc_ops {
const struct inode_operations *dir_inode_ops;
const struct inode_operations *file_inode_ops;
const struct file_operations *file_ops;
+ const struct nlmclnt_operations *nlmclnt_ops;
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index f21471f7ee40..6c8c5d8041b7 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -27,8 +27,8 @@
/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
#define FC_PORT_ROLE_NVME_INITIATOR 0x10
-#define FC_PORT_ROLE_NVME_TARGET 0x11
-#define FC_PORT_ROLE_NVME_DISCOVERY 0x12
+#define FC_PORT_ROLE_NVME_TARGET 0x20
+#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
/**
@@ -137,9 +137,9 @@ enum nvmefc_fcp_datadir {
* transferred. Should equal payload_length on success.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
- * NVME_SC_FC_xxx value upon failure. Note: this is NOT a
- * reflection of the NVME CQE completion status. Only the status
- * of the FCP operation at the NVME-FC level.
+ * negative errno value upon failure (ex: -EIO). Note: this is
+ * NOT a reflection of the NVME CQE completion status. Only the
+ * status of the FCP operation at the NVME-FC level.
*/
struct nvmefc_fcp_req {
void *cmdaddr;
@@ -533,9 +533,6 @@ enum {
* rsp as well
*/
NVMET_FCOP_RSP = 4, /* send rsp frame */
- NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */
- NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */
- NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */
};
/**
@@ -572,8 +569,6 @@ enum {
* upon compeletion of the operation. The nvmet-fc layer will also set a
* private pointer for its own use in the done routine.
*
- * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
- *
* Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
* entrypoint.
* @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
@@ -647,13 +642,21 @@ enum {
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
- NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1),
- /* Bit 1: When 0, the LLDD will deliver FCP CMD
- * on the CPU it should be affinitized to. Thus work will
- * be scheduled on the cpu received on. When 1, the LLDD
- * may not deliver the CMD on the CPU it should be worked
- * on. The transport should pick a cpu to schedule the work
- * on.
+ NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
+ /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the cmd rcv handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
+ NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
+ /* Bit 3: When 0, the LLDD is calling the op done handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the op done handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
*/
};
@@ -725,12 +728,12 @@ struct nvmet_fc_target_port {
* be freed/released.
* Entrypoint is Mandatory.
*
- * @fcp_op: Called to perform a data transfer, transmit a response, or
- * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
- * LLDD-supplied exchange structure specified in the
- * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
- * The op field in the structure shall indicate the operation for
- * the LLDD to perform relative to the io.
+ * @fcp_op: Called to perform a data transfer or transmit a response.
+ * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
+ * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
+ * made when the FCP CMD IU was received. The op field in the
+ * structure shall indicate the operation for the LLDD to perform
+ * relative to the io.
* NVMET_FCOP_READDATA operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). Note: the fc-nvme layer
@@ -752,29 +755,31 @@ struct nvmet_fc_target_port {
* successfully, the LLDD is to update the nvmefc_tgt_fcp_req
* transferred_length field and may subsequently transmit the
* FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
- * The LLDD is to await FCP_CONF reception to confirm the RSP
- * reception by the host. The LLDD may retramsit the FCP_RSP iu
- * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
- * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
- * fcp_error field and consider the operation complete..
+ * If FCP_CONF is supported, the LLDD is to await FCP_CONF
+ * reception to confirm the RSP reception by the host. The LLDD
+ * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete.
* NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
- * (described by rspbuf, rspdma, rsplen). The LLDD is to await
- * FCP_CONF reception to confirm the RSP reception by the host.
- * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
- * Upon reception of FCP_CONF, or upon FCP_CONF failure, the
+ * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
+ * supported, the LLDD is to await FCP_CONF reception to confirm
+ * the RSP reception by the host. The LLDD may retramsit the
+ * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
- * consider the operation complete..
- * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
- * corresponding to the fcp operation. The LLDD shall send
- * ABTS and follow FC exchange abort-multi rules, including
- * ABTS retries and possible logout.
+ * consider the operation complete.
* Upon completing the indicated operation, the LLDD is to set the
* status fields for the operation (tranferred_length and fcp_error
- * status) in the request, then all the "done" routine
- * indicated in the fcp request. Upon return from the "done"
- * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
- * the fc-nvme layer will not longer reference the fcp request,
- * allowing the LLDD to free/release the fcp request.
+ * status) in the request, then call the "done" routine
+ * indicated in the fcp request. After the operation completes,
+ * regardless of whether the FCP_RSP iu was successfully transmit,
+ * the LLDD-supplied exchange structure must remain valid until the
+ * transport calls the fcp_req_release() callback to return ownership
+ * of the exchange structure back to the LLDD so that it may be used
+ * for another fcp command.
* Note: when calling the done routine for READDATA or WRITEDATA
* operations, the fc-nvme layer may immediate convert, in the same
* thread and before returning to the LLDD, the fcp operation to
@@ -786,6 +791,22 @@ struct nvmet_fc_target_port {
* Returns 0 on success, -<errno> on failure (Ex: -EIO)
* Entrypoint is Mandatory.
*
+ * @fcp_abort: Called by the transport to abort an active command.
+ * The command may be in-between operations (nothing active in LLDD)
+ * or may have an active WRITEDATA operation pending. The LLDD is to
+ * initiate the ABTS process for the command and return from the
+ * callback. The ABTS does not need to be complete on the command.
+ * The fcp_abort callback inherently cannot fail. After the
+ * fcp_abort() callback completes, the transport will wait for any
+ * outstanding operation (if there was one) to complete, then will
+ * call the fcp_req_release() callback to return the command's
+ * exchange context back to the LLDD.
+ *
+ * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
+ * to the LLDD after all operations on the fcp operation are complete.
+ * This may be due to the command completing or upon completion of
+ * abort cleanup.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -820,7 +841,11 @@ struct nvmet_fc_target_template {
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *tls_req);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_fcp_req *);
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -848,4 +873,7 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
+void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 4b45226bd604..e997c4a49a88 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -16,8 +16,7 @@
*/
/*
- * This file contains definitions relative to FC-NVME r1.11 and a few
- * newer items
+ * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
*/
#ifndef _NVME_FC_H
@@ -47,8 +46,15 @@ struct nvme_fc_cmd_iu {
#define NVME_FC_SIZEOF_ZEROS_RSP 12
+enum {
+ FCNVME_SC_SUCCESS = 0,
+ FCNVME_SC_INVALID_FIELD = 1,
+ FCNVME_SC_INVALID_CONNID = 2,
+};
+
struct nvme_fc_ersp_iu {
- __u8 rsvd0[2];
+ __u8 status_code;
+ __u8 rsvd1;
__be16 iu_len;
__be32 rsn;
__be32 xfrd_len;
@@ -58,7 +64,7 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME r1.03/16-119v0 NVME Link Services */
+/* FC-NVME Link Services */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
@@ -68,7 +74,7 @@ enum {
FCNVME_LS_DISCONNECT = 5,
};
-/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
+/* FC-NVME Link Service Descriptors */
enum {
FCNVME_LSDESC_RSVD = 0x0,
FCNVME_LSDESC_RQST = 0x1,
@@ -92,7 +98,6 @@ static inline __be32 fcnvme_lsdesc_len(size_t sz)
return cpu_to_be32(sz - (2 * sizeof(u32)));
}
-
struct fcnvme_ls_rqst_w0 {
u8 ls_cmd; /* FCNVME_LS_xxx */
u8 zeros[3];
@@ -106,8 +111,53 @@ struct fcnvme_lsdesc_rqst {
__be32 rsvd12;
};
+/* FC-NVME LS RJT reason_code values */
+enum fcnvme_ls_rjt_reason {
+ FCNVME_RJT_RC_NONE = 0,
+ /* no reason - not to be sent */
+
+ FCNVME_RJT_RC_INVAL = 0x01,
+ /* invalid NVMe_LS command code */
+
+ FCNVME_RJT_RC_LOGIC = 0x03,
+ /* logical error */
+
+ FCNVME_RJT_RC_UNAB = 0x09,
+ /* unable to perform command request */
+
+ FCNVME_RJT_RC_UNSUP = 0x0b,
+ /* command not supported */
+
+ FCNVME_RJT_RC_INPROG = 0x0e,
+ /* command already in progress */
+ FCNVME_RJT_RC_INV_ASSOC = 0x40,
+ /* Invalid Association ID*/
+ FCNVME_RJT_RC_INV_CONN = 0x41,
+ /* Invalid Connection ID*/
+
+ FCNVME_RJT_RC_VENDOR = 0xff,
+ /* vendor specific error */
+};
+
+/* FC-NVME LS RJT reason_explanation values */
+enum fcnvme_ls_rjt_explan {
+ FCNVME_RJT_EXP_NONE = 0x00,
+ /* No additional explanation */
+
+ FCNVME_RJT_EXP_OXID_RXID = 0x17,
+ /* invalid OX_ID-RX_ID combination */
+
+ FCNVME_RJT_EXP_INSUF_RES = 0x29,
+ /* insufficient resources */
+
+ FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
+ /* unable to supply requested data */
+
+ FCNVME_RJT_EXP_INV_LEN = 0x2d,
+ /* Invalid payload length */
+};
/* FCNVME_LSDESC_RJT */
struct fcnvme_lsdesc_rjt {
@@ -119,15 +169,15 @@ struct fcnvme_lsdesc_rjt {
* Reject reason and explanaction codes are generic
* to ELs's from LS-3.
*/
- u8 reason_code;
- u8 reason_explanation;
+ u8 reason_code; /* fcnvme_ls_rjt_reason */
+ u8 reason_explanation; /* fcnvme_ls_rjt_explan */
u8 vendor;
__be32 rsvd12;
};
-#define FCNVME_ASSOC_HOSTID_LEN 64
+#define FCNVME_ASSOC_HOSTID_LEN 16
#define FCNVME_ASSOC_HOSTNQN_LEN 256
#define FCNVME_ASSOC_SUBNQN_LEN 256
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c43d435d4225..b625bacf37ef 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -64,26 +64,26 @@ enum {
* RDMA_QPTYPE field
*/
enum {
- NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
- NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
+ NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
* RDMA_QPTYPE field
*/
enum {
- NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
- NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
- NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
- NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
- NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
+ NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
+ NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
+ NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
+ NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
+ NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
};
/* RDMA Connection Management Service Type codes for Discovery Log Page
* entry TSAS RDMA_CMS field
*/
enum {
- NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
+ NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
#define NVMF_AQ_DEPTH 32
@@ -245,6 +245,7 @@ enum {
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
};
struct nvme_lbaf {
@@ -603,6 +604,7 @@ enum nvme_admin_opcode {
nvme_admin_download_fw = 0x11,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
+ nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
nvme_admin_security_recv = 0x82,
@@ -874,6 +876,16 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvme_dbbuf {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __le64 prp2;
+ __u32 rsvd12[6];
+};
+
struct nvme_command {
union {
struct nvme_common_command common;
@@ -893,6 +905,7 @@ struct nvme_command {
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvme_dbbuf dbbuf;
};
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 21e6323de0f3..50fcdb54087f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -159,6 +159,8 @@ static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
container_of(fwnode, struct device_node, fwnode) : NULL;
}
+#define of_fwnode_handle(node) (&(node)->fwnode)
+
static inline bool of_have_populated_dt(void)
{
return of_root != NULL;
@@ -292,6 +294,9 @@ extern int of_property_count_elems_of_size(const struct device_node *np,
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
+extern int of_property_read_u64_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u64 *out_value);
extern int of_property_read_variable_u8_array(const struct device_node *np,
const char *propname, u8 *out_values,
size_t sz_min, size_t sz_max);
@@ -602,6 +607,8 @@ static inline struct device_node *of_find_node_with_property(
return NULL;
}
+#define of_fwnode_handle(node) NULL
+
static inline bool of_have_populated_dt(void)
{
return false;
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index c12dace043f3..b4ad8b4f8506 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -34,8 +34,7 @@ extern void of_device_unregister(struct platform_device *ofdev);
extern const void *of_device_get_match_data(const struct device *dev);
-extern ssize_t of_device_get_modalias(struct device *dev,
- char *str, ssize_t len);
+extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
@@ -55,7 +54,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+int of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -72,8 +72,8 @@ static inline const void *of_device_get_match_data(const struct device *dev)
return NULL;
}
-static inline int of_device_get_modalias(struct device *dev,
- char *str, ssize_t len)
+static inline int of_device_modalias(struct device *dev,
+ char *str, ssize_t len)
{
return -ENODEV;
}
@@ -103,7 +103,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+
+static inline int of_dma_configure(struct device *dev, struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 271b3fdf0070..1dfbfd0d8040 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -54,6 +54,11 @@ extern char __dtb_end[];
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
int depth, void *data),
void *data);
+extern int of_scan_flat_dt_subnodes(unsigned long node,
+ int (*it)(unsigned long node,
+ const char *uname,
+ void *data),
+ void *data);
extern int of_get_flat_dt_subnode_by_name(unsigned long node,
const char *uname);
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
@@ -62,6 +67,7 @@ extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern int of_flat_dt_match(unsigned long node, const char *const *matches);
extern unsigned long of_get_flat_dt_root(void);
extern int of_get_flat_dt_size(void);
+extern uint32_t of_get_flat_dt_phandle(unsigned long node);
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 3f87ea5b8bee..1e089d5a182b 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -30,6 +30,7 @@ struct device_node;
enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
+ OF_GPIO_OPEN_DRAIN = 0x4,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a58cca8bcb29..ba35ba520487 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -12,7 +12,7 @@
#include <linux/phy.h>
#include <linux/of.h>
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF_MDIO)
extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -32,7 +32,7 @@ extern int of_phy_register_fixed_link(struct device_node *np);
extern void of_phy_deregister_fixed_link(struct device_node *np);
extern bool of_phy_is_fixed_link(struct device_node *np);
-#else /* CONFIG_OF */
+#else /* CONFIG_OF_MDIO */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
/*
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 0e0974eceb80..518c8d20647a 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -85,15 +85,4 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
}
#endif
-#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
-int of_pci_msi_chip_add(struct msi_controller *chip);
-void of_pci_msi_chip_remove(struct msi_controller *chip);
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
-#else
-static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
-static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
-static inline struct msi_controller *
-of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
-#endif
-
#endif
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 956a1006aefc..e0d1946270f3 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent);
+extern int of_platform_device_destroy(struct device *dev, void *data);
extern int of_platform_bus_probe(struct device_node *root,
const struct of_device_id *matches,
struct device *parent);
@@ -76,6 +77,10 @@ extern int of_platform_default_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent);
extern void of_platform_depopulate(struct device *parent);
+
+extern int devm_of_platform_populate(struct device *dev);
+
+extern void devm_of_platform_depopulate(struct device *dev);
#else
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
@@ -91,6 +96,13 @@ static inline int of_platform_default_populate(struct device_node *root,
return -ENODEV;
}
static inline void of_platform_depopulate(struct device *parent) { }
+
+static inline int devm_of_platform_populate(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void devm_of_platform_depopulate(struct device *dev) { }
#endif
#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 047d64706f2a..d4cd2014fa6f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,10 +33,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype);
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype);
+ int migratetype, int *num_movable);
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 84943e8057ef..316a19f6b635 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -148,7 +148,7 @@ static inline int page_cache_get_speculative(struct page *page)
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
@@ -186,7 +186,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_add(page, count);
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index f0d2b9451270..809c2f1873ac 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -16,6 +16,7 @@
#ifndef DRIVERS_PCI_ECAM_H
#define DRIVERS_PCI_ECAM_H
+#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -68,7 +69,7 @@ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
#endif
-#ifdef CONFIG_PCI_HOST_GENERIC
+#ifdef CONFIG_PCI_HOST_COMMON
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
new file mode 100644
index 000000000000..263b89ea5705
--- /dev/null
+++ b/include/linux/pci-ep-cfs.h
@@ -0,0 +1,41 @@
+/**
+ * PCI Endpoint ConfigFS header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EP_CFS_H
+#define __LINUX_PCI_EP_CFS_H
+
+#include <linux/configfs.h>
+
+#ifdef CONFIG_PCI_ENDPOINT_CONFIGFS
+struct config_group *pci_ep_cfs_add_epc_group(const char *name);
+void pci_ep_cfs_remove_epc_group(struct config_group *group);
+struct config_group *pci_ep_cfs_add_epf_group(const char *name);
+void pci_ep_cfs_remove_epf_group(struct config_group *group);
+#else
+static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+}
+
+static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+}
+#endif
+#endif /* __LINUX_PCI_EP_CFS_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
new file mode 100644
index 000000000000..af5edbf3eea3
--- /dev/null
+++ b/include/linux/pci-epc.h
@@ -0,0 +1,144 @@
+/**
+ * PCI Endpoint *Controller* (EPC) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPC_H
+#define __LINUX_PCI_EPC_H
+
+#include <linux/pci-epf.h>
+
+struct pci_epc;
+
+enum pci_epc_irq_type {
+ PCI_EPC_IRQ_UNKNOWN,
+ PCI_EPC_IRQ_LEGACY,
+ PCI_EPC_IRQ_MSI,
+};
+
+/**
+ * struct pci_epc_ops - set of function pointers for performing EPC operations
+ * @write_header: ops to populate configuration space header
+ * @set_bar: ops to configure the BAR
+ * @clear_bar: ops to reset the BAR
+ * @map_addr: ops to map CPU address to PCI address
+ * @unmap_addr: ops to unmap CPU address and PCI address
+ * @set_msi: ops to set the requested number of MSI interrupts in the MSI
+ * capability register
+ * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
+ * the MSI capability register
+ * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @start: ops to start the PCI link
+ * @stop: ops to stop the PCI link
+ * @owner: the module owner containing the ops
+ */
+struct pci_epc_ops {
+ int (*write_header)(struct pci_epc *pci_epc,
+ struct pci_epf_header *hdr);
+ int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+ void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
+ int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
+ u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
+ int (*set_msi)(struct pci_epc *epc, u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc);
+ int (*raise_irq)(struct pci_epc *pci_epc,
+ enum pci_epc_irq_type type, u8 interrupt_num);
+ int (*start)(struct pci_epc *epc);
+ void (*stop)(struct pci_epc *epc);
+ struct module *owner;
+};
+
+/**
+ * struct pci_epc_mem - address space of the endpoint controller
+ * @phys_base: physical base address of the PCI address space
+ * @size: the size of the PCI address space
+ * @bitmap: bitmap to manage the PCI address space
+ * @pages: number of bits representing the address region
+ */
+struct pci_epc_mem {
+ phys_addr_t phys_base;
+ size_t size;
+ unsigned long *bitmap;
+ int pages;
+};
+
+/**
+ * struct pci_epc - represents the PCI EPC device
+ * @dev: PCI EPC device
+ * @pci_epf: list of endpoint functions present in this EPC device
+ * @ops: function pointers for performing endpoint operations
+ * @mem: address space of the endpoint controller
+ * @max_functions: max number of functions that can be configured in this EPC
+ * @group: configfs group representing the PCI EPC device
+ * @lock: spinlock to protect pci_epc ops
+ */
+struct pci_epc {
+ struct device dev;
+ struct list_head pci_epf;
+ const struct pci_epc_ops *ops;
+ struct pci_epc_mem *mem;
+ u8 max_functions;
+ struct config_group *group;
+ /* spinlock to protect against concurrent access of EP controller */
+ spinlock_t lock;
+};
+
+#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+
+#define pci_epc_create(dev, ops) \
+ __pci_epc_create((dev), (ops), THIS_MODULE)
+#define devm_pci_epc_create(dev, ops) \
+ __devm_pci_epc_create((dev), (ops), THIS_MODULE)
+
+static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
+{
+ dev_set_drvdata(&epc->dev, data);
+}
+
+static inline void *epc_get_drvdata(struct pci_epc *epc)
+{
+ return dev_get_drvdata(&epc->dev);
+}
+
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
+void pci_epc_destroy(struct pci_epc *epc);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+void pci_epc_clear_bar(struct pci_epc *epc, int bar);
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size);
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc);
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+ u8 interrupt_num);
+int pci_epc_start(struct pci_epc *epc);
+void pci_epc_stop(struct pci_epc *epc);
+struct pci_epc *pci_epc_get(const char *epc_name);
+void pci_epc_put(struct pci_epc *epc);
+
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size);
+void pci_epc_mem_exit(struct pci_epc *epc);
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size);
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size);
+#endif /* __LINUX_PCI_EPC_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
new file mode 100644
index 000000000000..0d529cb90143
--- /dev/null
+++ b/include/linux/pci-epf.h
@@ -0,0 +1,162 @@
+/**
+ * PCI Endpoint *Function* (EPF) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPF_H
+#define __LINUX_PCI_EPF_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct pci_epf;
+
+enum pci_interrupt_pin {
+ PCI_INTERRUPT_UNKNOWN,
+ PCI_INTERRUPT_INTA,
+ PCI_INTERRUPT_INTB,
+ PCI_INTERRUPT_INTC,
+ PCI_INTERRUPT_INTD,
+};
+
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+/**
+ * struct pci_epf_header - represents standard configuration header
+ * @vendorid: identifies device manufacturer
+ * @deviceid: identifies a particular device
+ * @revid: specifies a device-specific revision identifier
+ * @progif_code: identifies a specific register-level programming interface
+ * @subclass_code: identifies more specifically the function of the device
+ * @baseclass_code: broadly classifies the type of function the device performs
+ * @cache_line_size: specifies the system cacheline size in units of DWORDs
+ * @subsys_vendor_id: vendor of the add-in card or subsystem
+ * @subsys_id: id specific to vendor
+ * @interrupt_pin: interrupt pin the device (or device function) uses
+ */
+struct pci_epf_header {
+ u16 vendorid;
+ u16 deviceid;
+ u8 revid;
+ u8 progif_code;
+ u8 subclass_code;
+ u8 baseclass_code;
+ u8 cache_line_size;
+ u16 subsys_vendor_id;
+ u16 subsys_id;
+ enum pci_interrupt_pin interrupt_pin;
+};
+
+/**
+ * struct pci_epf_ops - set of function pointers for performing EPF operations
+ * @bind: ops to perform when a EPC device has been bound to EPF device
+ * @unbind: ops to perform when a binding has been lost between a EPC device
+ * and EPF device
+ * @linkup: ops to perform when the EPC device has established a connection with
+ * a host system
+ */
+struct pci_epf_ops {
+ int (*bind)(struct pci_epf *epf);
+ void (*unbind)(struct pci_epf *epf);
+ void (*linkup)(struct pci_epf *epf);
+};
+
+/**
+ * struct pci_epf_driver - represents the PCI EPF driver
+ * @probe: ops to perform when a new EPF device has been bound to the EPF driver
+ * @remove: ops to perform when the binding between the EPF device and EPF
+ * driver is broken
+ * @driver: PCI EPF driver
+ * @ops: set of function pointers for performing EPF operations
+ * @owner: the owner of the module that registers the PCI EPF driver
+ * @group: configfs group corresponding to the PCI EPF driver
+ * @id_table: identifies EPF devices for probing
+ */
+struct pci_epf_driver {
+ int (*probe)(struct pci_epf *epf);
+ int (*remove)(struct pci_epf *epf);
+
+ struct device_driver driver;
+ struct pci_epf_ops *ops;
+ struct module *owner;
+ struct config_group *group;
+ const struct pci_epf_device_id *id_table;
+};
+
+#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
+ driver))
+
+/**
+ * struct pci_epf_bar - represents the BAR of EPF device
+ * @phys_addr: physical address that should be mapped to the BAR
+ * @size: the size of the address space present in BAR
+ */
+struct pci_epf_bar {
+ dma_addr_t phys_addr;
+ size_t size;
+};
+
+/**
+ * struct pci_epf - represents the PCI EPF device
+ * @dev: the PCI EPF device
+ * @name: the name of the PCI EPF device
+ * @header: represents standard configuration header
+ * @bar: represents the BAR of EPF device
+ * @msi_interrupts: number of MSI interrupts required by this function
+ * @func_no: unique function number within this endpoint device
+ * @epc: the EPC device to which this EPF device is bound
+ * @driver: the EPF driver to which this EPF device is bound
+ * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ */
+struct pci_epf {
+ struct device dev;
+ const char *name;
+ struct pci_epf_header *header;
+ struct pci_epf_bar bar[6];
+ u8 msi_interrupts;
+ u8 func_no;
+
+ struct pci_epc *epc;
+ struct pci_epf_driver *driver;
+ struct list_head list;
+};
+
+#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
+
+#define pci_epf_register_driver(driver) \
+ __pci_epf_register_driver((driver), THIS_MODULE)
+
+static inline void epf_set_drvdata(struct pci_epf *epf, void *data)
+{
+ dev_set_drvdata(&epf->dev, data);
+}
+
+static inline void *epf_get_drvdata(struct pci_epf *epf)
+{
+ return dev_get_drvdata(&epf->dev);
+}
+
+struct pci_epf *pci_epf_create(const char *name);
+void pci_epf_destroy(struct pci_epf *epf);
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner);
+void pci_epf_unregister_driver(struct pci_epf_driver *driver);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+int pci_epf_bind(struct pci_epf *epf);
+void pci_epf_unbind(struct pci_epf *epf);
+void pci_epf_linkup(struct pci_epf *epf);
+#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index eb3da1a04e6c..8039f9f0ca05 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -28,6 +28,7 @@
#include <linux/kobject.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
#include <uapi/linux/pci.h>
@@ -178,6 +179,15 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ /* a non-root bridge where translation occurs, stop alias search here */
+ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
+ /* Do not use FLR even if device advertises PCI_AF_CAP */
+ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
+ /*
+ * Resume before calling the driver's system suspend hooks, disabling
+ * the direct_complete optimization.
+ */
+ PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
};
enum pci_irq_reroute_variant {
@@ -358,6 +368,7 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int is_thunderbolt:1; /* Thunderbolt controller */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
@@ -396,6 +407,8 @@ struct pci_dev {
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
char *driver_override; /* Driver name to force a match */
+
+ unsigned long priv_flags; /* Private flags for the pci driver */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -940,32 +953,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
-static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
-{
- return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
-{
- return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
- u32 *val)
-{
- return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
-{
- return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
-{
- return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
- u32 val)
-{
- return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
-}
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
+int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
+int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
@@ -1052,6 +1045,7 @@ int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
+void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1072,6 +1066,11 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
+ irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
+ const char *fmt, ...);
+void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
+
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
@@ -1199,6 +1198,11 @@ unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size);
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
@@ -1297,11 +1301,8 @@ struct msix_entry {
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
-void pci_msi_shutdown(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
-int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
-void pci_msix_shutdown(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
@@ -1327,13 +1328,8 @@ int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline void pci_msi_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline int pci_enable_msix(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
-{ return -ENOSYS; }
-static inline void pci_msix_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
static inline int pci_msi_enabled(void) { return 0; }
@@ -1351,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
const struct irq_affinity *aff_desc)
{
- if (min_vecs > 1)
- return -EINVAL;
- return 1;
+ if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
+ return 1;
+ return -ENOSPC;
}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
@@ -1626,6 +1622,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h>
+/* These two functions provide almost identical functionality. Depennding
+ * on the architecture, one will be implemented as a wrapper around the
+ * other (in drivers/pci/mmap.c).
+ *
+ * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
+ * is expected to be an offset within that region.
+ *
+ * pci_mmap_page_range() is the legacy architecture-specific interface,
+ * which accepts a "user visible" resource address converted by
+ * pci_resource_to_user(), as used in the legacy mmap() interface in
+ * /proc/bus/pci/.
+ */
+int pci_mmap_resource_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+#ifndef arch_can_pci_mmap_wc
+#define arch_can_pci_mmap_wc() 0
+#endif
+
+#ifndef arch_can_pci_mmap_io
+#define arch_can_pci_mmap_io() 0
+#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
+#else
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
+#endif
+
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
@@ -2160,6 +2186,28 @@ static inline bool pci_ari_enabled(struct pci_bus *bus)
return bus->self && bus->self->ari_enabled;
}
+/**
+ * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
+ * @pdev: PCI device to check
+ *
+ * Walk upwards from @pdev and check for each encountered bridge if it's part
+ * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
+ * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
+ */
+static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev;
+
+ if (pdev->is_thunderbolt)
+ return true;
+
+ while ((parent = pci_upstream_bridge(parent)))
+ if (parent->is_thunderbolt)
+ return true;
+
+ return false;
+}
+
/* provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a4f77feecbb0..5f6b71d15393 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -862,6 +862,8 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_DRA74x 0xb500
+#define PCI_DEVICE_ID_TI_DRA72x 0xb501
#define PCI_VENDOR_ID_SONY 0x104d
diff --git a/include/linux/pe.h b/include/linux/pe.h
index e170b95e763b..143ce75be5f0 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -23,34 +23,6 @@
#define MZ_MAGIC 0x5a4d /* "MZ" */
-struct mz_hdr {
- uint16_t magic; /* MZ_MAGIC */
- uint16_t lbsize; /* size of last used block */
- uint16_t blocks; /* pages in file, 0x3 */
- uint16_t relocs; /* relocations */
- uint16_t hdrsize; /* header size in "paragraphs" */
- uint16_t min_extra_pps; /* .bss */
- uint16_t max_extra_pps; /* runtime limit for the arena size */
- uint16_t ss; /* relative stack segment */
- uint16_t sp; /* initial %sp register */
- uint16_t checksum; /* word checksum */
- uint16_t ip; /* initial %ip register */
- uint16_t cs; /* initial %cs relative to load segment */
- uint16_t reloc_table_offset; /* offset of the first relocation */
- uint16_t overlay_num; /* overlay number. set to 0. */
- uint16_t reserved0[4]; /* reserved */
- uint16_t oem_id; /* oem identifier */
- uint16_t oem_info; /* oem specific */
- uint16_t reserved1[10]; /* reserved */
- uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
-};
-
-struct mz_reloc {
- uint16_t offset;
- uint16_t segment;
-};
-
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
#define PE_OPT_MAGIC_PE32 0x010b
#define PE_OPT_MAGIC_PE32_ROM 0x0107
@@ -62,6 +34,7 @@ struct mz_reloc {
#define IMAGE_FILE_MACHINE_AMD64 0x8664
#define IMAGE_FILE_MACHINE_ARM 0x01c0
#define IMAGE_FILE_MACHINE_ARMV7 0x01c4
+#define IMAGE_FILE_MACHINE_ARM64 0xaa64
#define IMAGE_FILE_MACHINE_EBC 0x0ebc
#define IMAGE_FILE_MACHINE_I386 0x014c
#define IMAGE_FILE_MACHINE_IA64 0x0200
@@ -98,17 +71,6 @@ struct mz_reloc {
#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000
#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000
-struct pe_hdr {
- uint32_t magic; /* PE magic */
- uint16_t machine; /* machine type */
- uint16_t sections; /* number of sections */
- uint32_t timestamp; /* time_t */
- uint32_t symbol_table; /* symbol table offset */
- uint32_t symbols; /* number of symbols */
- uint16_t opt_hdr_size; /* size of optional header */
- uint16_t flags; /* flags */
-};
-
#define IMAGE_FILE_OPT_ROM_MAGIC 0x107
#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b
#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b
@@ -134,6 +96,95 @@ struct pe_hdr {
#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
+/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
+#define IMAGE_SCN_RESERVED_0 0x00000001
+#define IMAGE_SCN_RESERVED_1 0x00000002
+#define IMAGE_SCN_RESERVED_2 0x00000004
+#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
+#define IMAGE_SCN_RESERVED_3 0x00000010
+#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
+#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
+#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
+#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
+#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
+#define IMAGE_SCN_RESERVED_4 0x00000400
+#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
+#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
+#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
+#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
+#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
+/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
+#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
+#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
+#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
+#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
+/* and here they just stuck a 1-byte integer in the middle of a bitfield */
+#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
+#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
+#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
+#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
+#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
+#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
+#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
+#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
+#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
+#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
+#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
+#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
+#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
+#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
+#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
+#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
+#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
+#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
+#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
+#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
+#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
+
+#define IMAGE_DEBUG_TYPE_CODEVIEW 2
+
+#ifndef __ASSEMBLY__
+
+struct mz_hdr {
+ uint16_t magic; /* MZ_MAGIC */
+ uint16_t lbsize; /* size of last used block */
+ uint16_t blocks; /* pages in file, 0x3 */
+ uint16_t relocs; /* relocations */
+ uint16_t hdrsize; /* header size in "paragraphs" */
+ uint16_t min_extra_pps; /* .bss */
+ uint16_t max_extra_pps; /* runtime limit for the arena size */
+ uint16_t ss; /* relative stack segment */
+ uint16_t sp; /* initial %sp register */
+ uint16_t checksum; /* word checksum */
+ uint16_t ip; /* initial %ip register */
+ uint16_t cs; /* initial %cs relative to load segment */
+ uint16_t reloc_table_offset; /* offset of the first relocation */
+ uint16_t overlay_num; /* overlay number. set to 0. */
+ uint16_t reserved0[4]; /* reserved */
+ uint16_t oem_id; /* oem identifier */
+ uint16_t oem_info; /* oem specific */
+ uint16_t reserved1[10]; /* reserved */
+ uint32_t peaddr; /* address of pe header */
+ char message[64]; /* message to print */
+};
+
+struct mz_reloc {
+ uint16_t offset;
+ uint16_t segment;
+};
+
+struct pe_hdr {
+ uint32_t magic; /* PE magic */
+ uint16_t machine; /* machine type */
+ uint16_t sections; /* number of sections */
+ uint32_t timestamp; /* time_t */
+ uint32_t symbol_table; /* symbol table offset */
+ uint32_t symbols; /* number of symbols */
+ uint16_t opt_hdr_size; /* size of optional header */
+ uint16_t flags; /* flags */
+};
+
/* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't
* work right. vomit. */
struct pe32_opt_hdr {
@@ -243,52 +294,6 @@ struct section_header {
uint32_t flags;
};
-/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
-#define IMAGE_SCN_RESERVED_0 0x00000001
-#define IMAGE_SCN_RESERVED_1 0x00000002
-#define IMAGE_SCN_RESERVED_2 0x00000004
-#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
-#define IMAGE_SCN_RESERVED_3 0x00000010
-#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
-#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
-#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
-#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
-#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
-#define IMAGE_SCN_RESERVED_4 0x00000400
-#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
-#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
-#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
-#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
-#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
-/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
-#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
-#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
-#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
-#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
-/* and here they just stuck a 1-byte integer in the middle of a bitfield */
-#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
-#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
-#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
-#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
-#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
-#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
-#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
-#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
-#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
-#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
-#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
-#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
-#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
-#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
-#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
-#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
-#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
-#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
-#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
-#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
-#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
-#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
-
enum x64_coff_reloc_type {
IMAGE_REL_AMD64_ABSOLUTE = 0,
IMAGE_REL_AMD64_ADDR64,
@@ -445,4 +450,6 @@ struct win_certificate {
uint16_t cert_type;
};
+#endif /* !__ASSEMBLY__ */
+
#endif /* __LINUX_PE_H */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 3a481a49546e..c13dceb87b60 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -99,6 +99,7 @@ int __must_check percpu_ref_init(struct percpu_ref *ref,
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 56939d3f6e53..491b3f5a5f8a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 8462da266089..1360dd6d5e61 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -75,6 +75,8 @@ struct pmu_hw_events {
* already have to allocate this struct per cpu.
*/
struct arm_pmu *percpu_pmu;
+
+ int irq;
};
enum armpmu_attr_groups {
@@ -88,7 +90,6 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
cpumask_t supported_cpus;
- int *irq_affinity;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
@@ -104,12 +105,8 @@ struct arm_pmu {
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
- int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
- void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
- atomic_t active_events;
- struct mutex reserve_mutex;
u64 max_period;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
@@ -120,6 +117,9 @@ struct arm_pmu {
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
+
+ /* Only to be used by ACPI probing code */
+ unsigned long acpi_cpuid;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -135,10 +135,12 @@ int armpmu_map_event(struct perf_event *event,
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask);
+typedef int (*armpmu_init_fn)(struct arm_pmu *);
+
struct pmu_probe_info {
unsigned int cpuid;
unsigned int mask;
- int (*init)(struct arm_pmu *);
+ armpmu_init_fn init;
};
#define PMU_PROBE(_cpuid, _mask, _fn) \
@@ -160,6 +162,21 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table);
+#ifdef CONFIG_ACPI
+int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
+#else
+static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
+#endif
+
+/* Internal functions only for core arm_pmu code */
+struct arm_pmu *armpmu_alloc(void);
+void armpmu_free(struct arm_pmu *pmu);
+int armpmu_register(struct arm_pmu *pmu);
+int armpmu_request_irqs(struct arm_pmu *armpmu);
+void armpmu_free_irqs(struct arm_pmu *armpmu);
+int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
+void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
#endif /* CONFIG_ARM_PMU */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 000fdb211c7d..24a635887f28 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -165,6 +165,13 @@ struct hw_perf_event {
struct list_head bp_list;
};
#endif
+ struct { /* amd_iommu */
+ u8 iommu_bank;
+ u8 iommu_cntr;
+ u16 padding;
+ u64 conf;
+ u64 conf1;
+ };
};
/*
* If the event is a per task event, this will point to the task in
@@ -801,6 +808,7 @@ struct perf_output_handle {
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
+ u64 aux_flags;
union {
void *addr;
unsigned long head;
@@ -849,10 +857,11 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
- unsigned long size, bool truncated);
+ unsigned long size);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
+extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -1112,6 +1121,7 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
+extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
@@ -1267,8 +1277,8 @@ static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
static inline void
-perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
- bool truncated) { }
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
+ { }
static inline int
perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size) { return -EINVAL; }
@@ -1315,6 +1325,7 @@ static inline int perf_unregister_guest_info_callbacks
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
+static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 43a774873aa9..e76e4adbc7c7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -217,6 +217,13 @@ struct mii_bus {
* matching its address
*/
int irq[PHY_MAX_ADDR];
+
+ /* GPIO reset pulse width in microseconds */
+ int reset_delay_us;
+ /* Number of reset GPIOs */
+ int num_reset_gpios;
+ /* Array of RESET GPIO descriptors */
+ struct gpio_desc **reset_gpiod;
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -587,23 +594,29 @@ struct phy_driver {
*/
void (*link_change_notify)(struct phy_device *dev);
- /* A function provided by a phy specific driver to override the
- * the PHY driver framework support for reading a MMD register
- * from the PHY. If not supported, return -1. This function is
- * optional for PHY specific drivers, if not provided then the
- * default MMD read function is used by the PHY framework.
+ /*
+ * Phy specific driver override for reading a MMD register.
+ * This function is optional for PHY specific drivers. When
+ * not provided, the default MMD read function will be used
+ * by phy_read_mmd(), which will use either a direct read for
+ * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
+ * devnum is the MMD device number within the PHY device,
+ * regnum is the register within the selected MMD device.
*/
- int (*read_mmd_indirect)(struct phy_device *dev, int ptrad,
- int devnum, int regnum);
-
- /* A function provided by a phy specific driver to override the
- * the PHY driver framework support for writing a MMD register
- * from the PHY. This function is optional for PHY specific drivers,
- * if not provided then the default MMD read function is used by
- * the PHY framework.
+ int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
+
+ /*
+ * Phy specific driver override for writing a MMD register.
+ * This function is optional for PHY specific drivers. When
+ * not provided, the default MMD write function will be used
+ * by phy_write_mmd(), which will use either a direct write for
+ * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
+ * devnum is the MMD device number within the PHY device,
+ * regnum is the register within the selected MMD device.
+ * val is the value to be written.
*/
- void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
- int devnum, int regnum, u32 val);
+ int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
+ u16 val);
/* Get the size and type of the eeprom contained within a plug-in
* module */
@@ -651,25 +664,7 @@ struct phy_fixup {
*
* Same rules as for phy_read();
*/
-static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
-{
- if (!phydev->is_c45)
- return -EOPNOTSUPP;
-
- return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff));
-}
-
-/**
- * phy_read_mmd_indirect - reads data from the MMD registers
- * @phydev: The PHY device bus
- * @prtad: MMD Address
- * @addr: PHY address on the MII bus
- *
- * Description: it reads data from the MMD registers (clause 22 to access to
- * clause 45) of the specified phy address.
- */
-int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad);
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -752,35 +747,29 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
*
* Same rules as for phy_write();
*/
-static inline int phy_write_mmd(struct phy_device *phydev, int devad,
- u32 regnum, u16 val)
-{
- if (!phydev->is_c45)
- return -EOPNOTSUPP;
-
- regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff);
-
- return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
-}
-
-/**
- * phy_write_mmd_indirect - writes data to the MMD registers
- * @phydev: The PHY device
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- * @data: data to write in the MMD register
- *
- * Description: Write data from the MMD registers of the specified
- * phy address.
- */
-void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
- int devad, u32 data);
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
+#if IS_ENABLED(CONFIG_PHYLIB)
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
+void phy_device_free(struct phy_device *phydev);
+#else
+static inline
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
+{
+ return NULL;
+}
+
+static inline int phy_device_register(struct phy_device *phy)
+{
+ return 0;
+}
+
+static inline void phy_device_free(struct phy_device *phydev) { }
+#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
@@ -852,6 +841,7 @@ void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
+void phy_trigger_machine(struct phy_device *phydev, bool sync);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_ksettings_get(struct phy_device *phydev,
@@ -861,7 +851,6 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-void phy_device_free(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
@@ -888,8 +877,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
+#if IS_ENABLED(CONFIG_PHYLIB)
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
+#endif
extern struct bus_type mdio_bus_type;
@@ -900,7 +891,7 @@ struct mdio_board_info {
const void *platform_data;
};
-#if IS_ENABLED(CONFIG_PHYLIB)
+#if IS_ENABLED(CONFIG_MDIO_DEVICE)
int mdiobus_register_board_info(const struct mdio_board_info *info,
unsigned int n);
#else
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 7620eb127cff..279e3c5326e3 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -42,6 +42,8 @@
* @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
* impedance to VDD). If the argument is != 0 pull-up is enabled,
* if it is 0, pull-up is total, i.e. the pin is connected to VDD.
+ * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
+ * input and output operations.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
* which are then pulled up with an external resistor. Setting this
@@ -96,6 +98,7 @@ enum pin_config_param {
PIN_CONFIG_BIAS_PULL_DOWN,
PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
PIN_CONFIG_BIAS_PULL_UP,
+ PIN_CONFIG_BIDIRECTIONAL,
PIN_CONFIG_DRIVE_OPEN_DRAIN,
PIN_CONFIG_DRIVE_OPEN_SOURCE,
PIN_CONFIG_DRIVE_PUSH_PULL,
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 8ce2d87a238b..5e45385c5bdc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -145,8 +145,9 @@ struct pinctrl_desc {
extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev);
+extern int pinctrl_enable(struct pinctrl_dev *pctldev);
-/* Please use pinctrl_register_and_init() instead */
+/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index 0496d171700a..e8b12dbf6170 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -12,28 +12,8 @@
#include <linux/platform_device.h>
-#define MMU_REG_SIZE 256
-
-/**
- * struct iommu_arch_data - omap iommu private data
- * @name: name of the iommu device
- * @iommu_dev: handle of the iommu device
- *
- * This is an omap iommu private data object, which binds an iommu user
- * to its iommu device. This object should be placed at the iommu user's
- * dev_archdata so generic IOMMU API can be used without having to
- * utilize omap-specific plumbing anymore.
- */
-struct omap_iommu_arch_data {
- const char *name;
- struct omap_iommu *iommu_dev;
-};
-
struct iommu_platform_data {
- const char *name;
const char *reset_name;
- int nr_tlb_entries;
-
int (*assert_reset)(struct platform_device *pdev, const char *name);
int (*deassert_reset)(struct platform_device *pdev, const char *name);
};
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
index 1419133fa69e..4ac1a070af0a 100644
--- a/include/linux/platform_data/isl9305.h
+++ b/include/linux/platform_data/isl9305.h
@@ -24,7 +24,7 @@
struct regulator_init_data;
struct isl9305_pdata {
- struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
+ struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1];
};
#endif
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
index f16542c77ff7..0e95527edf25 100644
--- a/include/linux/platform_data/itco_wdt.h
+++ b/include/linux/platform_data/itco_wdt.h
@@ -14,6 +14,10 @@
struct itco_wdt_platform_data {
char name[32];
unsigned int version;
+ /* private data to be passed to update_no_reboot_bit API */
+ void *no_reboot_priv;
+ /* pointer for platform specific no reboot update function */
+ int (*update_no_reboot_bit)(void *priv, bool set);
};
#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/pn544.h b/include/linux/platform_data/pn544.h
deleted file mode 100644
index 5ce1ab983f44..000000000000
--- a/include/linux/platform_data/pn544.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Driver include for the PN544 NFC chip.
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _PN544_H_
-#define _PN544_H_
-
-#include <linux/i2c.h>
-
-enum {
- NFC_GPIO_ENABLE,
- NFC_GPIO_FW_RESET,
- NFC_GPIO_IRQ
-};
-
-/* board config */
-struct pn544_nfc_platform_data {
- int (*request_resources) (struct i2c_client *client);
- void (*free_resources) (void);
- void (*enable) (int fw);
- int (*test) (void);
- void (*disable) (void);
- int (*get_gpio)(int type);
-};
-
-#endif /* _PN544_H_ */
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
deleted file mode 100644
index cc2bdafb0c69..000000000000
--- a/include/linux/platform_data/st21nfca.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Driver include for the ST21NFCA NFC chip.
- *
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST21NFCA_HCI_H_
-#define _ST21NFCA_HCI_H_
-
-#include <linux/i2c.h>
-
-#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
-
-struct st21nfca_nfc_platform_data {
- unsigned int gpio_ena;
- unsigned int irq_polarity;
- bool is_ese_present;
- bool is_uicc_present;
-};
-
-#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
index a5c0a71ec914..cf9348b376ac 100644
--- a/include/linux/platform_data/video-imxfb.h
+++ b/include/linux/platform_data/video-imxfb.h
@@ -50,6 +50,7 @@
struct imx_fb_videomode {
struct fb_videomode mode;
u32 pcr;
+ bool aus_mode;
unsigned char bpp;
};
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 5339ed5bd6f9..b7803a251044 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -20,6 +20,7 @@
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
+#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -117,6 +118,7 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
+ void *data;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a3447932df1f..4c2cba7ec1d4 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -106,8 +106,8 @@ extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
extern void __pm_relax(struct wakeup_source *ws);
extern void pm_relax(struct device *dev);
-extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec);
-extern void pm_wakeup_event(struct device *dev, unsigned int msec);
+extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard);
+extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard);
#else /* !CONFIG_PM_SLEEP */
@@ -182,9 +182,11 @@ static inline void __pm_relax(struct wakeup_source *ws) {}
static inline void pm_relax(struct device *dev) {}
-static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {}
+static inline void pm_wakeup_ws_event(struct wakeup_source *ws,
+ unsigned int msec, bool hard) {}
-static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
+static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
+ bool hard) {}
#endif /* !CONFIG_PM_SLEEP */
@@ -201,4 +203,19 @@ static inline void wakeup_source_trash(struct wakeup_source *ws)
wakeup_source_drop(ws);
}
+static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
+{
+ return pm_wakeup_ws_event(ws, msec, false);
+}
+
+static inline void pm_wakeup_event(struct device *dev, unsigned int msec)
+{
+ return pm_wakeup_dev_event(dev, msec, false);
+}
+
+static inline void pm_wakeup_hard_event(struct device *dev)
+{
+ return pm_wakeup_dev_event(dev, 0, true);
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index e856c2cb0fe8..71ecf3d46aac 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,12 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
BUG();
}
-static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
-{
- BUG();
- return -EFAULT;
-}
-
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
@@ -65,23 +59,6 @@ static inline bool arch_has_pmem_api(void)
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
-/*
- * memcpy_from_pmem - read from persistent memory with error handling
- * @dst: destination buffer
- * @src: source buffer
- * @size: transfer length
- *
- * Returns 0 on success negative error code on failure.
- */
-static inline int memcpy_from_pmem(void *dst, void const *src, size_t size)
-{
- if (arch_has_pmem_api())
- return arch_memcpy_from_pmem(dst, src, size);
- else
- memcpy(dst, src, size);
- return 0;
-}
-
/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
diff --git a/include/linux/poll.h b/include/linux/poll.h
index a46d6755035e..75ffc5729e4c 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -98,64 +98,8 @@ extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
extern u64 select_estimate_accuracy(struct timespec64 *tv);
-
-static inline int poll_schedule(struct poll_wqueues *pwq, int state)
-{
- return poll_schedule_timeout(pwq, state, NULL, 0);
-}
-
-/*
- * Scalable version of the fd_set.
- */
-
-typedef struct {
- unsigned long *in, *out, *ex;
- unsigned long *res_in, *res_out, *res_ex;
-} fd_set_bits;
-
-/*
- * How many longwords for "nr" bits?
- */
-#define FDS_BITPERLONG (8*sizeof(long))
-#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
-#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
-
-/*
- * We do a VERIFY_WRITE here even though we are only reading this time:
- * we'll write to it eventually..
- *
- * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
- */
-static inline
-int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
-{
- nr = FDS_BYTES(nr);
- if (ufdset)
- return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
-
- memset(fdset, 0, nr);
- return 0;
-}
-
-static inline unsigned long __must_check
-set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
-{
- if (ufdset)
- return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
- return 0;
-}
-
-static inline
-void zero_fd_set(unsigned long nr, unsigned long *fdset)
-{
- memset(fdset, 0, FDS_BYTES(nr));
-}
-
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
-extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time);
-extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
- struct timespec64 *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec64 *end_time);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 34c4498b800f..83b22ae9ae12 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -59,23 +59,23 @@ struct posix_clock_operations {
int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
- int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+ int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
- int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+ int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts);
int (*clock_settime)(struct posix_clock *pc,
- const struct timespec *ts);
+ const struct timespec64 *ts);
int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
void (*timer_gettime)(struct posix_clock *pc,
- struct k_itimer *kit, struct itimerspec *tsp);
+ struct k_itimer *kit, struct itimerspec64 *tsp);
int (*timer_settime)(struct posix_clock *pc,
struct k_itimer *kit, int flags,
- struct itimerspec *tsp, struct itimerspec *old);
+ struct itimerspec64 *tsp, struct itimerspec64 *old);
/*
* Optional character device methods:
*/
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 64aa189efe21..8c1e43ab14a9 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -87,22 +87,22 @@ struct k_itimer {
};
struct k_clock {
- int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
+ int (*clock_getres) (const clockid_t which_clock, struct timespec64 *tp);
int (*clock_set) (const clockid_t which_clock,
- const struct timespec *tp);
- int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
+ const struct timespec64 *tp);
+ int (*clock_get) (const clockid_t which_clock, struct timespec64 *tp);
int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
int (*timer_create) (struct k_itimer *timer);
int (*nsleep) (const clockid_t which_clock, int flags,
- struct timespec *, struct timespec __user *);
+ struct timespec64 *, struct timespec __user *);
long (*nsleep_restart) (struct restart_block *restart_block);
- int (*timer_set) (struct k_itimer * timr, int flags,
- struct itimerspec * new_setting,
- struct itimerspec * old_setting);
- int (*timer_del) (struct k_itimer * timr);
+ int (*timer_set) (struct k_itimer *timr, int flags,
+ struct itimerspec64 *new_setting,
+ struct itimerspec64 *old_setting);
+ int (*timer_del) (struct k_itimer *timr);
#define TIMER_RETRY 1
- void (*timer_get) (struct k_itimer * timr,
- struct itimerspec * cur_setting);
+ void (*timer_get) (struct k_itimer *timr,
+ struct itimerspec64 *cur_setting);
};
extern struct k_clock clock_posix_cpu;
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
deleted file mode 100644
index 9f0283721cbc..000000000000
--- a/include/linux/power/bq24190_charger.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Platform data for the TI bq24190 battery charger driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _BQ24190_CHARGER_H_
-#define _BQ24190_CHARGER_H_
-
-struct bq24190_platform_data {
- unsigned int gpio_int; /* GPIO pin that's connected to INT# */
-};
-
-#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 522757ac9cd4..a7ed29baf44a 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -24,8 +24,15 @@
#define __MAX17042_BATTERY_H_
#define MAX17042_STATUS_BattAbsent (1 << 3)
-#define MAX17042_BATTERY_FULL (100)
+#define MAX17042_BATTERY_FULL (95) /* Recommend. FullSOCThr value */
#define MAX17042_DEFAULT_SNS_RESISTOR (10000)
+#define MAX17042_DEFAULT_VMIN (3000)
+#define MAX17042_DEFAULT_VMAX (4500) /* LiHV cell max */
+#define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */
+#define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */
+
+/* Consider RepCap which is less then 10 units below FullCAP full */
+#define MAX17042_FULL_THRESHOLD 10
#define MAX17042_CHARACTERIZATION_DATA_SIZE 48
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 571257e0f53d..e10f27468322 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -198,7 +198,7 @@ extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
-void log_buf_kexec_setup(void);
+void log_buf_vmcoreinfo_setup(void);
void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
@@ -246,7 +246,7 @@ static inline u32 log_buf_len_get(void)
return 0;
}
-static inline void log_buf_kexec_setup(void)
+static inline void log_buf_vmcoreinfo_setup(void)
{
}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 12cb8bd81d2d..58ab28d81fc2 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -14,6 +14,7 @@ struct inode;
struct proc_ns_operations {
const char *name;
+ const char *real_ns_name;
int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
@@ -26,6 +27,7 @@ extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
extern const struct proc_ns_operations ipcns_operations;
extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations pidns_for_children_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
extern const struct proc_ns_operations cgroupns_operations;
diff --git a/include/linux/property.h b/include/linux/property.h
index 64e3a9c6d95f..2f482616a2f2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -33,6 +33,8 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
+struct fwnode_handle *dev_fwnode(struct device *dev);
+
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval);
@@ -70,6 +72,15 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
int fwnode_property_match_string(struct fwnode_handle *fwnode,
const char *propname, const char *string);
+struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
+
+#define fwnode_for_each_child_node(fwnode, child) \
+ for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
+ child = fwnode_get_next_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child);
@@ -77,9 +88,12 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+ const char *childname);
struct fwnode_handle *device_get_named_child_node(struct device *dev,
const char *childname);
+void fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
@@ -258,4 +272,16 @@ int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
+struct fwnode_handle *fwnode_graph_get_next_endpoint(
+ struct fwnode_handle *fwnode, struct fwnode_handle *prev);
+struct fwnode_handle *fwnode_graph_get_remote_port_parent(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_graph_get_remote_port(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_graph_get_remote_endpoint(
+ struct fwnode_handle *fwnode);
+
+int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 0da29cae009b..e2233f50f428 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -30,7 +30,9 @@
#include <linux/time.h>
#include <linux/types.h>
-/* types */
+struct module;
+
+/* pstore record types (see fs/pstore/inode.c for filename templates) */
enum pstore_type_id {
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
@@ -45,42 +47,146 @@ enum pstore_type_id {
PSTORE_TYPE_UNKNOWN = 255
};
-struct module;
+struct pstore_info;
+/**
+ * struct pstore_record - details of a pstore record entry
+ * @psi: pstore backend driver information
+ * @type: pstore record type
+ * @id: per-type unique identifier for record
+ * @time: timestamp of the record
+ * @buf: pointer to record contents
+ * @size: size of @buf
+ * @ecc_notice_size:
+ * ECC information for @buf
+ *
+ * Valid for PSTORE_TYPE_DMESG @type:
+ *
+ * @count: Oops count since boot
+ * @reason: kdump reason for notification
+ * @part: position in a multipart record
+ * @compressed: whether the buffer is compressed
+ *
+ */
+struct pstore_record {
+ struct pstore_info *psi;
+ enum pstore_type_id type;
+ u64 id;
+ struct timespec time;
+ char *buf;
+ ssize_t size;
+ ssize_t ecc_notice_size;
+ int count;
+ enum kmsg_dump_reason reason;
+ unsigned int part;
+ bool compressed;
+};
+
+/**
+ * struct pstore_info - backend pstore driver structure
+ *
+ * @owner: module which is repsonsible for this backend driver
+ * @name: name of the backend driver
+ *
+ * @buf_lock: spinlock to serialize access to @buf
+ * @buf: preallocated crash dump buffer
+ * @bufsize: size of @buf available for crash dump writes
+ *
+ * @read_mutex: serializes @open, @read, @close, and @erase callbacks
+ * @flags: bitfield of frontends the backend can accept writes for
+ * @data: backend-private pointer passed back during callbacks
+ *
+ * Callbacks:
+ *
+ * @open:
+ * Notify backend that pstore is starting a full read of backend
+ * records. Followed by one or more @read calls, and a final @close.
+ *
+ * @psi: in: pointer to the struct pstore_info for the backend
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @close:
+ * Notify backend that pstore has finished a full read of backend
+ * records. Always preceded by an @open call and one or more @read
+ * calls.
+ *
+ * @psi: in: pointer to the struct pstore_info for the backend
+ *
+ * Returns 0 on success, and non-zero on error. (Though pstore will
+ * ignore the error.)
+ *
+ * @read:
+ * Read next available backend record. Called after a successful
+ * @open.
+ *
+ * @record:
+ * pointer to record to populate. @buf should be allocated
+ * by the backend and filled. At least @type and @id should
+ * be populated, since these are used when creating pstorefs
+ * file names.
+ *
+ * Returns record size on success, zero when no more records are
+ * available, or negative on error.
+ *
+ * @write:
+ * A newly generated record needs to be written to backend storage.
+ *
+ * @record:
+ * pointer to record metadata. When @type is PSTORE_TYPE_DMESG,
+ * @buf will be pointing to the preallocated @psi.buf, since
+ * memory allocation may be broken during an Oops. Regardless,
+ * @buf must be proccesed or copied before returning. The
+ * backend is also expected to write @id with something that
+ 8 can help identify this record to a future @erase callback.
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @write_user:
+ * Perform a frontend write to a backend record, using a specified
+ * buffer that is coming directly from userspace, instead of the
+ * @record @buf.
+ *
+ * @record: pointer to record metadata.
+ * @buf: pointer to userspace contents to write to backend
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @erase:
+ * Delete a record from backend storage. Different backends
+ * identify records differently, so entire original record is
+ * passed back to assist in identification of what the backend
+ * should remove from storage.
+ *
+ * @record: pointer to record metadata.
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ */
struct pstore_info {
struct module *owner;
char *name;
- spinlock_t buf_lock; /* serialize access to 'buf' */
+
+ spinlock_t buf_lock;
char *buf;
size_t bufsize;
- struct mutex read_mutex; /* serialize open/read/close */
+
+ struct mutex read_mutex;
+
int flags;
+ void *data;
+
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
- ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *time, char **buf,
- bool *compressed, ssize_t *ecc_notice_size,
- struct pstore_info *psi);
- int (*write)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, bool compressed,
- size_t size, struct pstore_info *psi);
- int (*write_buf)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char *buf, bool compressed,
- size_t size, struct pstore_info *psi);
- int (*write_buf_user)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char __user *buf,
- bool compressed, size_t size, struct pstore_info *psi);
- int (*erase)(enum pstore_type_id type, u64 id,
- int count, struct timespec time,
- struct pstore_info *psi);
- void *data;
+ ssize_t (*read)(struct pstore_record *record);
+ int (*write)(struct pstore_record *record);
+ int (*write_user)(struct pstore_record *record,
+ const char __user *buf);
+ int (*erase)(struct pstore_record *record);
};
+/* Supported frontends */
#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_FRAGILE PSTORE_FLAGS_DMESG
#define PSTORE_FLAGS_CONSOLE (1 << 1)
#define PSTORE_FLAGS_FTRACE (1 << 2)
#define PSTORE_FLAGS_PMSG (1 << 3)
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6c70444da3b9..6b2e0dd88569 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -34,11 +34,13 @@
struct ptr_ring {
int producer ____cacheline_aligned_in_smp;
spinlock_t producer_lock;
- int consumer ____cacheline_aligned_in_smp;
+ int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
+ int consumer_tail; /* next entry to invalidate */
spinlock_t consumer_lock;
/* Shared consumer/producer data */
/* Read-only by both the producer and the consumer */
int size ____cacheline_aligned_in_smp; /* max entries in queue */
+ int batch; /* number of entries to consume in a batch */
void **queue;
};
@@ -170,7 +172,7 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer];
+ return r->queue[r->consumer_head];
return NULL;
}
@@ -231,9 +233,38 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
- r->queue[r->consumer++] = NULL;
- if (unlikely(r->consumer >= r->size))
- r->consumer = 0;
+ /* Fundamentally, what we want to do is update consumer
+ * index and zero out the entry so producer can reuse it.
+ * Doing it naively at each consume would be as simple as:
+ * r->queue[r->consumer++] = NULL;
+ * if (unlikely(r->consumer >= r->size))
+ * r->consumer = 0;
+ * but that is suboptimal when the ring is full as producer is writing
+ * out new entries in the same cache line. Defer these updates until a
+ * batch of entries has been consumed.
+ */
+ int head = r->consumer_head++;
+
+ /* Once we have processed enough entries invalidate them in
+ * the ring all at once so producer can reuse their space in the ring.
+ * We also do this when we reach end of the ring - not mandatory
+ * but helps keep the implementation simple.
+ */
+ if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
+ r->consumer_head >= r->size)) {
+ /* Zero out entries in the reverse order: this way we touch the
+ * cache line that producer might currently be reading the last;
+ * producer won't make progress and touch other cache lines
+ * besides the first one until we write out all entries.
+ */
+ while (likely(head >= r->consumer_tail))
+ r->queue[head--] = NULL;
+ r->consumer_tail = r->consumer_head;
+ }
+ if (unlikely(r->consumer_head >= r->size)) {
+ r->consumer_head = 0;
+ r->consumer_tail = 0;
+ }
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
@@ -345,14 +376,27 @@ static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
}
+static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
+{
+ r->size = size;
+ r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
+ /* We need to set batch at least to 1 to make logic
+ * in __ptr_ring_discard_one work correctly.
+ * Batching too much (because ring is small) would cause a lot of
+ * burstiness. Needs tuning, for now disable batching.
+ */
+ if (r->batch > r->size / 2 || !r->batch)
+ r->batch = 1;
+}
+
static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
{
r->queue = __ptr_ring_init_queue_alloc(size, gfp);
if (!r->queue)
return -ENOMEM;
- r->size = size;
- r->producer = r->consumer = 0;
+ __ptr_ring_set_size(r, size);
+ r->producer = r->consumer_head = r->consumer_tail = 0;
spin_lock_init(&r->producer_lock);
spin_lock_init(&r->consumer_lock);
@@ -373,9 +417,10 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
- r->size = size;
+ __ptr_ring_set_size(r, size);
r->producer = producer;
- r->consumer = 0;
+ r->consumer_head = 0;
+ r->consumer_tail = 0;
old = r->queue;
r->queue = queue;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 422bc2e4cb6a..ef3eb8bbfee4 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
- struct task_struct *new_parent);
+ struct task_struct *new_parent,
+ const struct cred *ptracer_cred);
extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
if (unlikely(ptrace) && current->ptrace) {
child->ptrace = current->ptrace;
- __ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent, current->ptracer_cred);
if (child->ptrace & PT_SEIZED)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
set_tsk_thread_flag(child, TIF_SIGPENDING);
}
+ else
+ child->ptracer_cred = NULL;
}
/**
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index d32f6f1a5225..e5380471c2cd 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -40,6 +40,9 @@ extern int qcom_scm_pas_shutdown(u32 peripheral);
extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
#else
static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
@@ -67,5 +70,8 @@ static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
+static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 52966b9bfde3..fbab6e0514f0 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -100,8 +100,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 10
-#define FW_REVISION_VERSION 10
+#define FW_MINOR_VERSION 15
+#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -187,6 +187,9 @@
/* DEMS */
#define DQ_DEMS_LEGACY 0
+#define DQ_DEMS_TOE_MORE_TO_SEND 3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
+#define DQ_DEMS_ROCE_CQ_CONS 7
/* XCM agg val selection */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
@@ -214,6 +217,9 @@
#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -269,6 +275,8 @@
#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -285,6 +293,9 @@
#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
/* TCM agg counter flag selection (HW) */
#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
@@ -301,6 +312,9 @@
#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
@@ -689,6 +703,16 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
+struct rdma_eqe_destroy_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+union rdma_eqe_data {
+ struct regpair async_handle;
+ struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@@ -705,9 +729,9 @@ union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
+ union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
- struct regpair roce_handle;
};
/* Event Ring Entry */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 4b402fb0eaad..34d93eb5bfba 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -49,6 +49,9 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 2e417a45c5f7..947a635d04bb 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data {
struct regpair terminate_params_addr;
};
-struct fcoe_fast_sgl_ctx {
- struct regpair sgl_start_addr;
- __le32 sgl_byte_offset;
- __le16 task_reuse_cnt;
- __le16 init_offset_in_first_sge;
-};
-
struct fcoe_slow_sgl_ctx {
struct regpair base_sgl_addr;
__le16 curr_sge_off;
@@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx {
__le16 reserved;
};
-struct fcoe_sge {
- struct regpair sge_addr;
- __le16 size;
- __le16 reserved0;
- u8 reserved1[3];
- u8 is_valid_sge;
-};
-
-union fcoe_data_desc_ctx {
- struct fcoe_fast_sgl_ctx fast;
- struct fcoe_slow_sgl_ctx slow;
- struct fcoe_sge single_sge;
-};
-
union fcoe_dix_desc_ctx {
struct fcoe_slow_sgl_ctx dix_sgl;
- struct fcoe_sge cached_dix_sge;
+ struct scsi_sge cached_dix_sge;
+};
+
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
};
struct fcoe_fcp_cmd_payload {
@@ -172,57 +158,6 @@ enum fcoe_mode_type {
MAX_FCOE_MODE_TYPE
};
-struct fcoe_mstorm_fcoe_task_st_ctx_fp {
- __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
- __le16 difDataResidue;
- __le16 parent_id;
- __le16 single_sge_saved_offset;
- __le32 data_2_trns_rem;
- __le32 offset_in_io;
- union fcoe_dix_desc_ctx dix_desc;
- union fcoe_data_desc_ctx data_desc;
-};
-
-struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
- __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
- u8 tx_rx_sgl_mode;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
- u8 rsrv2;
- __le32 num_prm_zero_read;
- struct regpair rsp_buf_addr;
-};
-
struct fcoe_rx_stat {
struct regpair fcoe_rx_byte_cnt;
struct regpair fcoe_rx_data_pkt_cnt;
@@ -236,16 +171,6 @@ struct fcoe_rx_stat {
__le32 rsrv;
};
-enum fcoe_sgl_mode {
- FCOE_SLOW_SGL,
- FCOE_SINGLE_FAST_SGE,
- FCOE_2_FAST_SGE,
- FCOE_3_FAST_SGE,
- FCOE_4_FAST_SGE,
- FCOE_MUL_FAST_SGES,
- MAX_FCOE_SGL_MODE
-};
-
struct fcoe_stat_ramrod_data {
struct regpair stat_params_addr;
};
@@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx {
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
union protection_info_union_ctx protection_info_union;
__le32 data_2_trns_rem;
+ struct scsi_sgl_params sgl_params;
+ u8 reserved1[12];
union fcoe_tx_info_union_ctx tx_info_union;
union fcoe_dix_desc_ctx dix_desc;
- union fcoe_data_desc_ctx data_desc;
+ struct scsi_cached_sges data_desc;
__le16 ox_id;
__le16 rx_id;
__le32 task_rety_identifier;
- __le32 reserved1[2];
+ u8 reserved2[8];
};
struct ystorm_fcoe_task_ag_ctx {
@@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx {
};
struct mstorm_fcoe_task_st_ctx {
- struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
- struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+ struct regpair rsp_buf_addr;
+ __le32 rsrv[2];
+ struct scsi_sgl_params sgl_params;
+ __le32 data_2_trns_rem;
+ __le32 data_buffer_offset;
+ __le16 parent_id;
+ __le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
+ struct scsi_cached_sges data_desc;
};
struct ustorm_fcoe_task_ag_ctx {
@@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx {
struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@@ -668,20 +622,20 @@ struct fcoe_tx_stat {
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK 0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT 0
-#define FCOE_WQE_SGL_MODE_MASK 0x7
-#define FCOE_WQE_SGL_MODE_SHIFT 4
-#define FCOE_WQE_CONTINUATION_MASK 0x1
-#define FCOE_WQE_CONTINUATION_SHIFT 7
-#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
-#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
-#define FCOE_WQE_SUPER_IO_MASK 0x1
-#define FCOE_WQE_SUPER_IO_SHIFT 9
-#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
-#define FCOE_WQE_RESERVED0_MASK 0x1F
-#define FCOE_WQE_RESERVED0_SHIFT 11
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x1
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK 0x1
+#define FCOE_WQE_RESERVED_SHIFT 7
+#define FCOE_WQE_NUM_SGES_MASK 0xF
+#define FCOE_WQE_NUM_SGES_SHIFT 8
+#define FCOE_WQE_RESERVED1_MASK 0xF
+#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4c5747babcf6..69949f8e354b 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -39,17 +39,9 @@
/* iSCSI HSI constants */
#define ISCSI_DEFAULT_MTU (1500)
-/* Current iSCSI HSI version number composed of two fields (16 bit) */
-#define ISCSI_HSI_MAJOR_VERSION (0)
-#define ISCSI_HSI_MINOR_VERSION (0)
-
/* KWQ (kernel work queue) layer codes */
#define ISCSI_SLOW_PATH_LAYER_CODE (6)
-/* CQE completion status */
-#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
-#define ISCSI_EQE_RST_CONN_RCVD (0x1)
-
/* iSCSI parameter defaults */
#define ISCSI_DEFAULT_HEADER_DIGEST (0)
#define ISCSI_DEFAULT_DATA_DIGEST (0)
@@ -68,6 +60,10 @@
#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+#define ISCSI_AHS_CNTL_SIZE 4
+
+#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
+
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
@@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr {
__le32 reserved7;
};
-struct iscsi_sge {
- struct regpair sge_addr;
- __le16 sge_len;
- __le16 reserved0;
- __le32 reserved1;
-};
-
-struct iscsi_cached_sge_ctx {
- struct iscsi_sge sge;
- struct regpair reserved;
- __le32 dsgl_curr_offset[2];
-};
-
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
@@ -229,8 +212,13 @@ struct iscsi_common_hdr {
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 lun_reserved[4];
- __le32 data[6];
+ struct regpair lun_reserved;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmdstat_sn;
+ __le32 exp_statcmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data[3];
};
struct iscsi_conn_offload_params {
@@ -246,8 +234,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
@@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
u8 reserved0[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
@@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
__le32 expected_transfer_length;
__le32 cmd_sn;
__le32 exp_stat_sn;
- struct iscsi_sge cdb_sge;
+ struct scsi_sge cdb_sge;
};
struct iscsi_login_req_hdr {
@@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr {
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4;
- __le16 time2retain;
- __le16 time2wait;
+ __le16 time_2_retain;
+ __le16 time_2_wait;
__le32 reserved5[1];
};
@@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr {
#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
- __le32 rtt;
+ __le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
@@ -641,7 +635,7 @@ struct iscsi_reject_hdr {
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
- __le32 reserved1;
+ __le32 all_ones;
__le32 reserved2;
__le32 stat_sn;
__le32 exp_cmd_sn;
@@ -688,7 +682,9 @@ struct iscsi_cqe_solicited {
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
- __le32 reserved1[2];
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1[1];
union iscsi_task_hdr iscsi_hdr;
};
@@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
-struct iscsi_virt_sgl_ctx {
- struct regpair sgl_base;
- struct regpair dsgl_base;
- __le32 sgl_initial_offset;
- __le32 dsgl_initial_offset;
- __le32 dsgl_curr_offset[2];
-};
-
-struct iscsi_sgl_var_params {
- u8 sgl_ptr;
- u8 dsgl_ptr;
- __le16 sge_offset;
- __le16 dsge_offset;
-};
-
-struct iscsi_phys_sgl_ctx {
- struct regpair sgl_base;
- struct regpair dsgl_base;
- u8 sgl_size;
- u8 dsgl_size;
- __le16 reserved;
- struct iscsi_sgl_var_params var_params[2];
-};
-
-union iscsi_data_desc_ctx {
- struct iscsi_virt_sgl_ctx virt_sgl;
- struct iscsi_phys_sgl_ctx phys_sgl;
- struct iscsi_cached_sge_ctx cached_sge;
-};
struct iscsi_debug_modes {
u8 flags;
@@ -771,8 +738,10 @@ struct iscsi_debug_modes {
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
-#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
-#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
};
struct iscsi_dif_flags {
@@ -806,7 +775,6 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
- ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
MAX_ISCSI_EQE_OPCODE
};
@@ -856,31 +824,11 @@ enum iscsi_error_types {
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+ ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
-struct iscsi_mflags {
- u8 mflags;
-#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
-#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
-#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
-#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
-#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
-#define ISCSI_MFLAGS_RESERVED_SHIFT 2
-};
-
-struct iscsi_sgl {
- struct regpair sgl_addr;
- __le16 updated_sge_size;
- __le16 updated_sge_offset;
- __le32 byte_offset;
-};
-
-union iscsi_mstorm_sgl {
- struct iscsi_sgl sgl_struct;
- struct iscsi_sge single_sge;
-};
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id {
struct iscsi_reg1 {
__le32 reg1_map;
-#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 3
+#define ISCSI_REG1_NUM_SGES_MASK 0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
};
union iscsi_seq_num {
@@ -967,22 +915,33 @@ struct iscsi_spe_func_init {
};
struct ystorm_iscsi_task_state {
- union iscsi_data_desc_ctx sgl_ctx_union;
- __le32 buffer_offset[2];
- __le16 bytes_nxt_dif;
- __le16 rxmit_bytes_nxt_dif;
- union iscsi_seq_num seq_num_union;
- u8 dif_bytes_leftover;
- u8 rxmit_dif_bytes_leftover;
- __le16 reuse_count;
- struct iscsi_dif_flags dif_flags;
- u8 local_comp;
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
- __le32 sgl_offset[2];
+ __le32 buffer_offset;
+ union iscsi_seq_num seq_num;
+ struct iscsi_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
+};
+
+struct ystorm_iscsi_task_rxmit_opt {
+ __le32 fast_rxmit_sge_offset;
+ __le32 scan_start_buffer_offset;
+ __le32 fast_rxmit_buffer_offset;
+ u8 scan_start_sgl_index;
+ u8 fast_rxmit_sgl_index;
+ __le16 reserved;
};
struct ystorm_iscsi_task_st_ctx {
struct ystorm_iscsi_task_state state;
+ struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
union iscsi_task_hdr pdu_hdr;
};
@@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
};
struct mstorm_iscsi_task_st_ctx {
- union iscsi_mstorm_sgl sgl_union;
- struct iscsi_dif_flags dif_flags;
- struct iscsi_mflags flags;
- u8 sgl_size;
- u8 host_sge_index;
- __le16 dix_cur_sge_offset;
- __le16 dix_cur_sge_size;
- __le32 data_offset_rtid;
- u8 dif_offset;
- u8 dix_sgl_size;
- u8 dix_sge_index;
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
u8 task_type;
+ struct iscsi_dif_flags dif_flags;
+ u8 reserved0[2];
struct regpair sense_db;
- struct regpair dix_sgl_cur_sge;
- __le32 rem_task_size;
- __le16 reuse_count;
- __le16 dif_data_residue;
- u8 reserved0[4];
- __le32 reserved1[1];
+ __le32 expected_itt;
+ __le32 reserved1;
};
struct ustorm_iscsi_task_st_ctx {
@@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
- u8 reserved2;
+ struct iscsi_dif_flags dif_flags;
__le16 reserved3;
__le32 reserved4;
__le32 reserved5;
@@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
@@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
@@ -1272,32 +1221,22 @@ struct iscsi_uhqe {
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
-struct iscsi_wqe_field {
- __le32 contlen_cdbsize_field;
-#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
-#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
-#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
-};
-
-union iscsi_wqe_field_union {
- struct iscsi_wqe_field cont_field;
- __le32 prev_tid;
-};
struct iscsi_wqe {
__le16 task_id;
u8 flags;
#define ISCSI_WQE_WQE_TYPE_MASK 0x7
#define ISCSI_WQE_WQE_TYPE_SHIFT 0
-#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
-#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
-#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_NUM_SGES_MASK 0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
#define ISCSI_WQE_RESPONSE_MASK 0x1
#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
- union iscsi_wqe_field_union cont_prevtid_union;
+ __le32 contlen_cdbsize;
+#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
enum iscsi_wqe_type {
@@ -1318,17 +1257,15 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
-#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_XHQE_FINAL_MASK 0x1
-#define ISCSI_XHQE_FINAL_SHIFT 3
-#define ISCSI_XHQE_SUPER_IO_MASK 0x1
-#define ISCSI_XHQE_SUPER_IO_SHIFT 4
-#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
-#define ISCSI_XHQE_RESERVED_MASK 0x3
-#define ISCSI_XHQE_RESERVED_SHIFT 6
- union iscsi_seq_num seq_num_union;
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 0
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK 0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT 2
+#define ISCSI_XHQE_RESERVED0_MASK 0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT 6
+ union iscsi_seq_num seq_num;
__le16 reserved1;
};
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4cd1f0ccfa36..d66d16a559e1 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -158,15 +158,27 @@ struct qed_tunn_params {
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac, bool forced);
+ void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port);
};
#define QED_MAX_PHC_DRIFT_PPB 291666666
enum qed_ptp_filter_type {
- QED_PTP_FILTER_L2,
- QED_PTP_FILTER_IPV4,
- QED_PTP_FILTER_IPV4_IPV6,
- QED_PTP_FILTER_L2_IPV4_IPV6
+ QED_PTP_FILTER_NONE,
+ QED_PTP_FILTER_ALL,
+ QED_PTP_FILTER_V1_L4_EVENT,
+ QED_PTP_FILTER_V1_L4_GEN,
+ QED_PTP_FILTER_V2_L4_EVENT,
+ QED_PTP_FILTER_V2_L4_GEN,
+ QED_PTP_FILTER_V2_L2_EVENT,
+ QED_PTP_FILTER_V2_L2_GEN,
+ QED_PTP_FILTER_V2_EVENT,
+ QED_PTP_FILTER_V2_GEN
+};
+
+enum qed_ptp_hwtstamp_tx_type {
+ QED_PTP_HWTSTAMP_TX_OFF,
+ QED_PTP_HWTSTAMP_TX_ON,
};
#ifdef CONFIG_DCB
@@ -229,8 +241,8 @@ struct qed_eth_dcbnl_ops {
#endif
struct qed_eth_ptp_ops {
- int (*hwtstamp_tx_on)(struct qed_dev *);
- int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
+ int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type,
+ enum qed_ptp_hwtstamp_tx_type);
int (*read_rx_ts)(struct qed_dev *, u64 *);
int (*read_tx_ts)(struct qed_dev *, u64 *);
int (*read_cc)(struct qed_dev *, u64 *);
@@ -301,6 +313,14 @@ struct qed_eth_ops {
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);
+
+ int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
+ dma_addr_t mapping, u16 length,
+ u16 vport_id, u16 rx_queue_id,
+ bool add_filter);
+
+ int (*configure_arfs_searcher)(struct qed_dev *cdev,
+ bool en_searcher);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index fde56c436f71..c70ac13a97e6 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -144,6 +144,7 @@ struct qed_dcbx_operational_params {
bool enabled;
bool ieee;
bool cee;
+ bool local;
u32 err;
};
@@ -178,6 +179,12 @@ struct qed_eth_pf_params {
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
};
struct qed_fcoe_pf_params {
@@ -263,7 +270,6 @@ struct qed_rdma_pf_params {
* the doorbell BAR).
*/
u32 min_dpis; /* number of requested DPIs */
- u32 num_mrs; /* number of requested memory regions */
u32 num_qps; /* number of requested Queue Pairs */
u32 num_srqs; /* number of requested SRQ */
u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
@@ -300,6 +306,11 @@ struct qed_sb_info {
struct qed_dev *cdev;
};
+enum qed_dev_type {
+ QED_DEV_TYPE_BB,
+ QED_DEV_TYPE_AH,
+};
+
struct qed_dev_info {
unsigned long pci_mem_start;
unsigned long pci_mem_end;
@@ -325,6 +336,13 @@ struct qed_dev_info {
u16 mtu;
bool wol_support;
+
+ enum qed_dev_type dev_type;
+
+ /* Output parameters for qede */
+ bool vxlan_enable;
+ bool gre_enable;
+ bool geneve_enable;
};
enum qed_sb_type {
@@ -421,6 +439,7 @@ struct qed_int_info {
};
struct qed_common_cb_ops {
+ void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
struct qed_link_output *link);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
@@ -616,7 +635,7 @@ struct qed_common_ops {
* @return 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u8 qid, u16 sb_id);
+ u16 qid, u16 sb_id);
/**
* @brief set_led - Configure LED mode
@@ -752,7 +771,7 @@ enum qed_mf_mode {
QED_MF_NPAR,
};
-struct qed_eth_stats {
+struct qed_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -784,11 +803,6 @@ struct qed_eth_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -805,14 +819,8 @@ struct qed_eth_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
- u64 tx_1519_to_2047_byte_packets;
- u64 tx_2048_to_4095_byte_packets;
- u64 tx_4096_to_9216_byte_packets;
- u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
- u64 tx_lpi_entry_count;
- u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
@@ -827,6 +835,34 @@ struct qed_eth_stats {
u64 tx_mac_ctrl_frames;
};
+struct qed_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct qed_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qed_eth_stats {
+ struct qed_eth_stats_common common;
+
+ union {
+ struct qed_eth_stats_bb bb;
+ struct qed_eth_stats_ah ah;
+ };
+};
+
#define QED_SB_IDX 0x0002
#define RX_PI 0
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index f70bb81b8b6a..3414649133d2 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -67,6 +67,8 @@ struct qed_dev_iscsi_info {
void __iomem *primary_dbq_rq_addr;
void __iomem *secondary_bdq_rq_addr;
+
+ u8 num_cqs;
};
struct qed_iscsi_id_params {
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
index f742d4312c9d..cbb2ff0ce4bc 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_roce_if.h
@@ -240,6 +240,7 @@ struct qed_rdma_add_user_out_params {
u64 dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
+ u16 wid_count;
};
enum roce_mode {
@@ -533,6 +534,7 @@ enum qed_rdma_type {
struct qed_dev_rdma_info {
struct qed_dev_info common;
enum qed_rdma_type rdma_type;
+ u8 user_dpm_enabled;
};
struct qed_rdma_ops {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index f773aa5e746f..72c770f9f666 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -52,7 +52,8 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index bad02df213df..866f063026de 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -38,4 +38,21 @@
#define ROCE_MAX_QPS (32 * 1024)
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 03f3e37ab059..08df82a096b6 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -40,6 +40,8 @@
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
+#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
struct scsi_bd {
@@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
__le16 reserved0[3];
};
+struct scsi_sge {
+ struct regpair sge_addr;
+ __le32 sge_len;
+ __le32 reserved;
+};
+
+struct scsi_cached_sges {
+ struct scsi_sge sge[4];
+};
+
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
@@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
-struct scsi_sge {
- struct regpair sge_addr;
- __le16 sge_len;
- __le16 reserved0;
- __le32 reserved1;
+enum scsi_sgl_mode {
+ SCSI_TX_SLOW_SGL,
+ SCSI_FAST_SGL,
+ MAX_SCSI_SGL_MODE
+};
+
+struct scsi_sgl_params {
+ struct regpair sgl_addr;
+ __le32 sgl_total_length;
+ __le32 sge_offset;
+ __le16 sgl_num_sges;
+ u8 sgl_index;
+ u8 reserved;
};
struct scsi_terminate_extra_params {
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 46fe7856f1b2..a5e843268f0e 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
TCP_EVENT_ADD_ISLE_RIGHT,
TCP_EVENT_ADD_ISLE_LEFT,
TCP_EVENT_JOIN,
+ TCP_EVENT_DELETE_ISLES,
TCP_EVENT_NOP,
MAX_TCP_SEG_PLACEMENT_EVENT
};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 799a63d0e1a8..9c6f768b7d32 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -162,7 +162,6 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
* Operations supported for diskquotas.
*/
extern const struct dquot_operations dquot_operations;
-extern const struct quotactl_ops dquot_quotactl_ops;
extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
#else
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 2aceeafd6fe5..ffb147185e8d 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -1,14 +1,25 @@
#ifndef __RAS_H__
#define __RAS_H__
+#include <asm/errno.h>
+
#ifdef CONFIG_DEBUG_FS
int ras_userspace_consumers(void);
void ras_debugfs_init(void);
int ras_add_daemon_trace(void);
#else
static inline int ras_userspace_consumers(void) { return 0; }
-static inline void ras_debugfs_init(void) { return; }
+static inline void ras_debugfs_init(void) { }
static inline int ras_add_daemon_trace(void) { return 0; }
#endif
+#ifdef CONFIG_RAS_CEC
+void __init cec_init(void);
+int __init parse_cec_param(char *str);
+int cec_add_elem(u64 pfn);
+#else
+static inline void __init cec_init(void) { }
+static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
+
+#endif /* __RAS_H__ */
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
new file mode 100644
index 000000000000..4b766b61e1a0
--- /dev/null
+++ b/include/linux/rcu_node_tree.h
@@ -0,0 +1,99 @@
+/*
+ * RCU node combining tree definitions. These are used to compute
+ * global attributes while avoiding common-case global contention. A key
+ * property that these computations rely on is a tournament-style approach
+ * where only one of the tasks contending a lower level in the tree need
+ * advance to the next higher level. If properly configured, this allows
+ * unlimited scalability while maintaining a constant level of contention
+ * on the root node.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_RCU_NODE_TREE_H
+#define __LINUX_RCU_NODE_TREE_H
+
+/*
+ * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
+ * CONFIG_RCU_FANOUT_LEAF.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this did work well going from three levels to four.
+ * Of course, your mileage may vary.
+ */
+
+#ifdef CONFIG_RCU_FANOUT
+#define RCU_FANOUT CONFIG_RCU_FANOUT
+#else /* #ifdef CONFIG_RCU_FANOUT */
+# ifdef CONFIG_64BIT
+# define RCU_FANOUT 64
+# else
+# define RCU_FANOUT 32
+# endif
+#endif /* #else #ifdef CONFIG_RCU_FANOUT */
+
+#ifdef CONFIG_RCU_FANOUT_LEAF
+#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
+#define RCU_FANOUT_LEAF 16
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
+
+#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
+#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
+#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
+#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT_1
+# define RCU_NUM_LVLS 1
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_NODES NUM_RCU_LVL_0
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
+#elif NR_CPUS <= RCU_FANOUT_2
+# define RCU_NUM_LVLS 2
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+#elif NR_CPUS <= RCU_FANOUT_3
+# define RCU_NUM_LVLS 3
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+#elif NR_CPUS <= RCU_FANOUT_4
+# define RCU_NUM_LVLS 4
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
+
+#endif /* __LINUX_RCU_NODE_TREE_H */
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
new file mode 100644
index 000000000000..ba4d2621d9ca
--- /dev/null
+++ b/include/linux/rcu_segcblist.h
@@ -0,0 +1,90 @@
+/*
+ * RCU segmented callback lists
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
+#define __INCLUDE_LINUX_RCU_SEGCBLIST_H
+
+/* Simple unsegmented callback lists. */
+struct rcu_cblist {
+ struct rcu_head *head;
+ struct rcu_head **tail;
+ long len;
+ long len_lazy;
+};
+
+#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
+
+/* Complicated segmented callback lists. ;-) */
+
+/*
+ * Index values for segments in rcu_segcblist structure.
+ *
+ * The segments are as follows:
+ *
+ * [head, *tails[RCU_DONE_TAIL]):
+ * Callbacks whose grace period has elapsed, and thus can be invoked.
+ * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]):
+ * Callbacks waiting for the current GP from the current CPU's viewpoint.
+ * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]):
+ * Callbacks that arrived before the next GP started, again from
+ * the current CPU's viewpoint. These can be handled by the next GP.
+ * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]):
+ * Callbacks that might have arrived after the next GP started.
+ * There is some uncertainty as to when a given GP starts and
+ * ends, but a CPU knows the exact times if it is the one starting
+ * or ending the GP. Other CPUs know that the previous GP ends
+ * before the next one starts.
+ *
+ * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also
+ * empty.
+ *
+ * The ->gp_seq[] array contains the grace-period number at which the
+ * corresponding segment of callbacks will be ready to invoke. A given
+ * element of this array is meaningful only when the corresponding segment
+ * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks
+ * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have
+ * not yet been assigned a grace-period number).
+ */
+#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL 3
+#define RCU_CBLIST_NSEGS 4
+
+struct rcu_segcblist {
+ struct rcu_head *head;
+ struct rcu_head **tails[RCU_CBLIST_NSEGS];
+ unsigned long gp_seq[RCU_CBLIST_NSEGS];
+ long len;
+ long len_lazy;
+};
+
+#define RCU_SEGCBLIST_INITIALIZER(n) \
+{ \
+ .head = NULL, \
+ .tails[RCU_DONE_TAIL] = &n.head, \
+ .tails[RCU_WAIT_TAIL] = &n.head, \
+ .tails[RCU_NEXT_READY_TAIL] = &n.head, \
+ .tails[RCU_NEXT_TAIL] = &n.head, \
+}
+
+#endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4f7a9561b8c4..b1fd8bf85fdc 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -509,7 +509,8 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
{
struct hlist_node *i, *last = NULL;
- for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; i; i = i->next)
last = i;
if (last) {
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de88b33c0974..e1e5d002fdb9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
unsigned long secs,
unsigned long c_old,
unsigned long c);
+bool rcu_irq_enter_disabled(void);
#else
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
int *flags,
@@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void)
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
+static inline bool rcu_irq_enter_disabled(void)
+{
+ return false;
+}
#ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
@@ -363,15 +368,20 @@ static inline void rcu_init_nohz(void)
#ifdef CONFIG_TASKS_RCU
#define TASKS_RCU(x) x
extern struct srcu_struct tasks_rcu_exit_srcu;
-#define rcu_note_voluntary_context_switch(t) \
+#define rcu_note_voluntary_context_switch_lite(t) \
do { \
- rcu_all_qs(); \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
+#define rcu_note_voluntary_context_switch(t) \
+ do { \
+ rcu_all_qs(); \
+ rcu_note_voluntary_context_switch_lite(t); \
+ } while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
@@ -1127,11 +1137,11 @@ do { \
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
-#ifdef CONFIG_PPC
+#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
-#else /* #ifdef CONFIG_PPC */
+#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#define smp_mb__after_unlock_lock() do { } while (0)
-#endif /* #else #ifdef CONFIG_PPC */
+#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b452953e21c8..74d9c3a1feee 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -33,6 +33,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
return 0;
}
+static inline bool rcu_eqs_special_set(int cpu)
+{
+ return false; /* Never flag non-existent other CPUs! */
+}
+
static inline unsigned long get_state_synchronize_rcu(void)
{
return 0;
@@ -87,10 +92,11 @@ static inline void kfree_call_rcu(struct rcu_head *head,
call_rcu(head, func);
}
-static inline void rcu_note_context_switch(void)
-{
- rcu_sched_qs();
-}
+#define rcu_note_context_switch(preempt) \
+ do { \
+ rcu_sched_qs(); \
+ rcu_note_voluntary_context_switch_lite(current); \
+ } while (0)
/*
* Take advantage of the fact that there is only one CPU, which
@@ -212,14 +218,14 @@ static inline void exit_rcu(void)
{
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
extern int rcu_scheduler_active __read_mostly;
void rcu_scheduler_starting(void);
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
static inline void rcu_scheduler_starting(void)
{
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
@@ -237,6 +243,10 @@ static inline bool rcu_is_watching(void)
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+static inline void rcu_request_urgent_qs_task(struct task_struct *t)
+{
+}
+
static inline void rcu_all_qs(void)
{
barrier(); /* Avoid RCU read-side critical sections leaking across. */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 63a4e4cf40a5..0bacb6b2af69 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,7 +30,7 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-void rcu_note_context_switch(void);
+void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
@@ -41,7 +41,7 @@ void rcu_cpu_stall_reset(void);
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
- rcu_note_context_switch();
+ rcu_note_context_switch(false);
}
void synchronize_rcu_bh(void);
@@ -108,6 +108,7 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
bool rcu_is_watching(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
void rcu_all_qs(void);
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0023fee4bbbc..b34aa649d204 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -6,17 +6,36 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
+/**
+ * refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+/**
+ * refcount_set - set a refcount's value
+ * @r: the refcount
+ * @n: value to which the refcount will be set
+ */
static inline void refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}
+/**
+ * refcount_read - get a refcount's value
+ * @r: the refcount
+ *
+ * Return: the refcount's value
+ */
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
diff --git a/include/linux/regulator/arizona-ldo1.h b/include/linux/regulator/arizona-ldo1.h
new file mode 100644
index 000000000000..c685f1277c63
--- /dev/null
+++ b/include/linux/regulator/arizona-ldo1.h
@@ -0,0 +1,24 @@
+/*
+ * Platform data for Arizona LDO1 regulator
+ *
+ * Copyright 2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARIZONA_LDO1_H
+#define ARIZONA_LDO1_H
+
+struct regulator_init_data;
+
+struct arizona_ldo1_pdata {
+ /** GPIO controlling LDOENA, if any */
+ int ldoena;
+
+ /** Regulator configuration for LDO1 */
+ const struct regulator_init_data *init_data;
+};
+
+#endif
diff --git a/include/linux/regulator/arizona-micsupp.h b/include/linux/regulator/arizona-micsupp.h
new file mode 100644
index 000000000000..616842619c00
--- /dev/null
+++ b/include/linux/regulator/arizona-micsupp.h
@@ -0,0 +1,21 @@
+/*
+ * Platform data for Arizona micsupp regulator
+ *
+ * Copyright 2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARIZONA_MICSUPP_H
+#define ARIZONA_MICSUPP_H
+
+struct regulator_init_data;
+
+struct arizona_micsupp_pdata {
+ /** Regulator configuration for micsupp */
+ const struct regulator_init_data *init_data;
+};
+
+#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index ea0fffa5faeb..df176d7c2b87 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -119,6 +119,7 @@ struct regmap;
#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
/*
* Regulator errors that can be queried using regulator_get_error_flags
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index dac8e7b16bc6..94417b4226bd 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -292,6 +292,14 @@ enum regulator_type {
* set_active_discharge
* @active_discharge_reg: Register for control when using regmap
* set_active_discharge
+ * @soft_start_reg: Register for control when using regmap set_soft_start
+ * @soft_start_mask: Mask for control when using regmap set_soft_start
+ * @soft_start_val_on: Enabling value for control when using regmap
+ * set_soft_start
+ * @pull_down_reg: Register for control when using regmap set_pull_down
+ * @pull_down_mask: Mask for control when using regmap set_pull_down
+ * @pull_down_val_on: Enabling value for control when using regmap
+ * set_pull_down
*
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
@@ -345,6 +353,12 @@ struct regulator_desc {
unsigned int active_discharge_off;
unsigned int active_discharge_mask;
unsigned int active_discharge_reg;
+ unsigned int soft_start_reg;
+ unsigned int soft_start_mask;
+ unsigned int soft_start_val_on;
+ unsigned int pull_down_reg;
+ unsigned int pull_down_mask;
+ unsigned int pull_down_val_on;
unsigned int enable_time;
@@ -429,6 +443,8 @@ struct regulator_dev {
struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
+ unsigned int is_switch:1;
+
/* time when this regulator was disabled last time */
unsigned long last_off_jiffy;
};
@@ -476,6 +492,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector);
int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
+int regulator_set_soft_start_regmap(struct regulator_dev *rdev);
+int regulator_set_pull_down_regmap(struct regulator_dev *rdev);
int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c9f795e9a2ee..117699d1f7df 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -108,6 +108,8 @@ struct regulator_state {
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @settling_time: Time to settle down after voltage change when voltage
+ * change is non-linear (unit: microseconds).
* @active_discharge: Enable/disable active discharge. The enum
* regulator_active_discharge values are used for
* initialisation.
@@ -149,6 +151,7 @@ struct regulation_constraints {
unsigned int initial_mode;
unsigned int ramp_delay;
+ unsigned int settling_time;
unsigned int enable_time;
unsigned int active_discharge;
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 70c6c66c5bcf..e0ccf46f66cf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -48,6 +48,7 @@
#define PFUZE200_VGEN4 10
#define PFUZE200_VGEN5 11
#define PFUZE200_VGEN6 12
+#define PFUZE200_COIN 13
#define PFUZE3000_SW1A 0
#define PFUZE3000_SW1B 1
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 2b5a4679daea..156cfd330b66 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -167,6 +167,26 @@ reservation_object_lock(struct reservation_object *obj,
}
/**
+ * reservation_object_trylock - trylock the reservation object
+ * @obj: the reservation object
+ *
+ * Tries to lock the reservation object for exclusive access and modification.
+ * Note, that the lock is only against other writers, readers will run
+ * concurrently with a writer under RCU. The seqlock is used to notify readers
+ * if they overlap with a writer.
+ *
+ * Also note that since no context is provided, no deadlock protection is
+ * possible.
+ *
+ * Returns true if the lock was acquired, false otherwise.
+ */
+static inline bool __must_check
+reservation_object_trylock(struct reservation_object *obj)
+{
+ return ww_mutex_trylock(&obj->lock);
+}
+
+/**
* reservation_object_unlock - unlock the reservation object
* @obj: the reservation object
*
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 96fb139bdd08..13d8681210d5 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
bool optional);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+ int index, bool shared,
+ bool optional);
void reset_control_put(struct reset_control *rstc);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
@@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline struct reset_control *__reset_control_get(
+ struct device *dev, const char *id,
+ int index, bool shared, bool optional)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
int index, bool shared, bool optional)
@@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
#endif
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
- false);
+ return __reset_control_get(dev, id, 0, false, false);
}
/**
@@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
- false);
+ return __reset_control_get(dev, id, 0, true, false);
}
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
- true);
+ return __reset_control_get(dev, id, 0, false, true);
}
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
- true);
+ return __reset_control_get(dev, id, 0, true, true);
}
/**
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 092292b6675e..7d56a7ea2b2e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -49,6 +49,21 @@
/* Base bits plus 1 bit for nulls marker */
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+/* Maximum chain length before rehash
+ *
+ * The maximum (not average) chain length grows with the size of the hash
+ * table, at a rate of (log N)/(log log N).
+ *
+ * The value of 16 is selected so that even if the hash table grew to
+ * 2^32 you would not expect the maximum chain length to exceed it
+ * unless we are under attack (or extremely unlucky).
+ *
+ * As this limit is only to detect attacks, we don't need to set it to a
+ * lower value as you'd need the chain length to vastly exceed 16 to have
+ * any real effect on the system.
+ */
+#define RHT_ELASTICITY 16u
+
struct rhash_head {
struct rhash_head __rcu *next;
};
@@ -110,29 +125,25 @@ struct rhashtable;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @insecure_max_entries: Maximum number of entries (may be exceeded)
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
- * @nulls_base: Base value to generate nulls marker
- * @insecure_elasticity: Set to true to disable chain length checks
- * @automatic_shrinking: Enable automatic shrinking of tables
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @nulls_base: Base value to generate nulls marker
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
* @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
- size_t nelem_hint;
- size_t key_len;
- size_t key_offset;
- size_t head_offset;
- unsigned int insecure_max_entries;
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
unsigned int max_size;
- unsigned int min_size;
- u32 nulls_base;
- bool insecure_elasticity;
+ u16 min_size;
bool automatic_shrinking;
- size_t locks_mul;
+ u8 locks_mul;
+ u32 nulls_base;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
@@ -143,8 +154,8 @@ struct rhashtable_params {
* @tbl: Bucket table
* @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @max_elems: Maximum number of elements in table
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
@@ -154,8 +165,8 @@ struct rhashtable {
struct bucket_table __rcu *tbl;
atomic_t nelems;
unsigned int key_len;
- unsigned int elasticity;
struct rhashtable_params p;
+ unsigned int max_elems;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
@@ -318,8 +329,7 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
static inline bool rht_grow_above_max(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
- return ht->p.insecure_max_entries &&
- atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+ return atomic_read(&ht->nelems) >= ht->max_elems;
}
/* The bucket lock is selected based on the hash and protects mutations
@@ -726,7 +736,7 @@ slow_path:
return rhashtable_insert_slow(ht, key, obj);
}
- elasticity = ht->elasticity;
+ elasticity = RHT_ELASTICITY;
pprev = rht_bucket_insert(ht, tbl, hash);
data = ERR_PTR(-ENOMEM);
if (!pprev)
@@ -916,6 +926,28 @@ static inline int rhashtable_lookup_insert_fast(
}
/**
+ * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Just like rhashtable_lookup_insert_fast(), but this function returns the
+ * object if it exists, NULL if it did not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
+}
+
+/**
* rhashtable_lookup_insert_key - search and insert object to hash table
* with explicit key
* @ht: hash table
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b6d4568795a7..ee9b461af095 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
+void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 8c89e902df3e..43ef2c30cb0f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -83,19 +83,17 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_UNMAP = 1, /* unmap mode */
- TTU_MIGRATION = 2, /* migration mode */
- TTU_MUNLOCK = 4, /* munlock mode */
- TTU_LZFREE = 8, /* lazy free mode */
- TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
-
- TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
- TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
- TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
- TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
+ TTU_MIGRATION = 0x1, /* migration mode */
+ TTU_MUNLOCK = 0x2, /* munlock mode */
+
+ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
+ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
+ TTU_IGNORE_ACCESS = 0x10, /* don't age */
+ TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
- TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
+ TTU_RMAP_LOCKED = 0x80 /* do not grab rmap lock:
* caller holds it */
};
@@ -193,9 +191,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-
-int try_to_unmap(struct page *, enum ttu_flags flags);
+bool try_to_unmap(struct page *, enum ttu_flags flags);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
@@ -239,7 +235,7 @@ int page_mkclean(struct page *);
* called in munlock()/munmap() path to check for other vmas holding
* the page mlocked.
*/
-int try_to_munlock(struct page *);
+void try_to_munlock(struct page *);
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
@@ -261,15 +257,19 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
- int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ /*
+ * Return false if page table scanning in rmap_walk should be stopped.
+ * Otherwise, return true.
+ */
+ bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
@@ -285,7 +285,7 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs) false
static inline int page_mkclean(struct page *page)
{
@@ -295,13 +295,4 @@ static inline int page_mkclean(struct page *page)
#endif /* CONFIG_MMU */
-/*
- * Return values of try_to_unmap
- */
-#define SWAP_SUCCESS 0
-#define SWAP_AGAIN 1
-#define SWAP_FAIL 2
-#define SWAP_MLOCK 3
-#define SWAP_LZFREE 4
-
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h
index ea05f6c51413..84766bcdd01f 100644
--- a/include/linux/rodata_test.h
+++ b/include/linux/rodata_test.h
@@ -14,7 +14,6 @@
#define _RODATA_TEST_H
#ifdef CONFIG_DEBUG_RODATA_TEST
-extern const int rodata_test_data;
void rodata_test(void);
#else
static inline void rodata_test(void) {}
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
index 8ec8b6439b25..f27917e0a101 100644
--- a/include/linux/rpmsg/qcom_smd.h
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -6,7 +6,7 @@
struct qcom_smd_edge;
-#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD)
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD)
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index d4e0a204c118..a1904aadbc45 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -176,6 +176,25 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
/**
+ * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
+ * limiting the depth used from each word.
+ * @sb: Bitmap to allocate from.
+ * @alloc_hint: Hint for where to start searching for a free bit.
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ *
+ * This rather specific operation allows for having multiple users with
+ * different allocation limits. E.g., there can be a high-priority class that
+ * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
+ * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
+ * class can only allocate half of the total bits in the bitmap, preventing it
+ * from starving out the high-priority class.
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
+ unsigned long shallow_depth);
+
+/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
* @sb: Bitmap to check.
*
@@ -326,6 +345,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
+ * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * sbitmap_queue, limiting the depth used from each word, with preemption
+ * already disabled.
+ * @sbq: Bitmap queue to allocate from.
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * See sbitmap_get_shallow().
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
+
+/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
* sbitmap_queue.
* @sbq: Bitmap queue to allocate from.
@@ -346,6 +378,29 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * sbitmap_queue, limiting the depth used from each word.
+ * @sbq: Bitmap queue to allocate from.
+ * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
+ * sbitmap_queue_clear()).
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * See sbitmap_get_shallow().
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int *cpu,
+ unsigned int shallow_depth)
+{
+ int nr;
+
+ *cpu = get_cpu();
+ nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
+ put_cpu();
+ return nr;
+}
+
+/**
* sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
* &struct sbitmap_queue.
* @sbq: Bitmap to free from.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d67eee84fd43..2b69fc650201 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -186,7 +186,7 @@ extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
/**
- * struct prev_cputime - snaphsot of system and user cputime
+ * struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
* @lock: protects the above two fields
@@ -604,6 +604,10 @@ struct task_struct {
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+#ifdef CONFIG_CGROUPS
+ /* disallow userland-initiated cgroup migration */
+ unsigned no_cgroup_migration:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -775,6 +779,8 @@ struct task_struct {
/* PI waiters blocked on a rt_mutex held by this task: */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
+ /* Updated under owner's pi_lock and rq lock */
+ struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
struct rt_mutex_waiter *pi_blocked_on;
#endif
@@ -1038,6 +1044,13 @@ struct task_struct {
/* A live task holds one reference: */
atomic_t stack_refcount;
#endif
+#ifdef CONFIG_LIVEPATCH
+ int patch_state;
+#endif
+#ifdef CONFIG_SECURITY
+ /* Used by LSM modules for access restriction: */
+ void *security;
+#endif
/* CPU-specific state of this task: */
struct thread_struct thread;
@@ -1211,9 +1224,9 @@ extern struct pid *cad_pid;
#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
-#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */
-#define PF_KSWAPD 0x00040000 /* I am kswapd */
-#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
+#define PF_KSWAPD 0x00020000 /* I am kswapd */
+#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
+#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
@@ -1256,7 +1269,6 @@ extern struct pid *cad_pid;
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
#define TASK_PFA_TEST(name, func) \
@@ -1282,14 +1294,11 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
-TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
-TASK_PFA_SET(LMK_WAITING, lmk_waiting)
-
static inline void
-tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags)
+current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
- task->flags &= ~flags;
- task->flags |= orig_flags & flags;
+ current->flags &= ~flags;
+ current->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 830953ebb391..2b24a6974847 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -149,13 +149,21 @@ static inline bool in_vfork(struct task_struct *tsk)
return ret;
}
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
- * __GFP_FS is also cleared as it implies __GFP_IO.
+/*
+ * Applies per-task gfp context to the given allocation flags.
+ * PF_MEMALLOC_NOIO implies GFP_NOIO
+ * PF_MEMALLOC_NOFS implies GFP_NOFS
*/
-static inline gfp_t memalloc_noio_flags(gfp_t flags)
+static inline gfp_t current_gfp_context(gfp_t flags)
{
+ /*
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precendence
+ */
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
+ else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
+ flags &= ~__GFP_FS;
return flags;
}
@@ -171,4 +179,28 @@ static inline void memalloc_noio_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+static inline unsigned int memalloc_nofs_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
+ current->flags |= PF_MEMALLOC_NOFS;
+ return flags;
+}
+
+static inline void memalloc_nofs_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
+}
+
+static inline unsigned int memalloc_noreclaim_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC;
+ current->flags |= PF_MEMALLOC;
+ return flags;
+}
+
+static inline void memalloc_noreclaim_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC) | flags;
+}
+
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 3bd668414f61..f93329aba31a 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,27 +18,20 @@ static inline int rt_task(struct task_struct *p)
}
#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(struct task_struct *p);
-extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
-extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
+/*
+ * Must hold either p->pi_lock or task_rq(p)->lock.
+ */
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
+{
+ return p->pi_top_task;
+}
+extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
return tsk->pi_blocked_on != NULL;
}
#else
-static inline int rt_mutex_getprio(struct task_struct *p)
-{
- return p->normal_prio;
-}
-
-static inline int rt_mutex_get_effective_prio(struct task_struct *task,
- int newprio)
-{
- return newprio;
-}
-
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 2cf446704cd4..c06d63b3a583 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -293,7 +293,6 @@ extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
diff --git a/include/linux/security.h b/include/linux/security.h
index 96899fad7016..af675b576645 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -133,6 +133,10 @@ extern unsigned long dac_mmap_min_addr;
/* setfsuid or setfsgid, id0 == fsuid or fsgid */
#define LSM_SETID_FS 8
+/* Flags for security_task_prlimit(). */
+#define LSM_PRLIMIT_READ 1
+#define LSM_PRLIMIT_WRITE 2
+
/* forward declares to avoid warnings */
struct sched_param;
struct request_sock;
@@ -304,6 +308,7 @@ int security_file_send_sigiotask(struct task_struct *tsk,
int security_file_receive(struct file *file);
int security_file_open(struct file *file, const struct cred *cred);
int security_task_create(unsigned long clone_flags);
+int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
@@ -324,6 +329,8 @@ void security_task_getsecid(struct task_struct *p, u32 *secid);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
+int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags);
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int security_task_setscheduler(struct task_struct *p);
@@ -855,6 +862,12 @@ static inline int security_task_create(unsigned long clone_flags)
return 0;
}
+static inline int security_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ return 0;
+}
+
static inline void security_task_free(struct task_struct *task)
{ }
@@ -949,6 +962,13 @@ static inline int security_task_getioprio(struct task_struct *p)
return 0;
}
+static inline int security_task_prlimit(const struct cred *cred,
+ const struct cred *tcred,
+ unsigned int flags)
+{
+ return 0;
+}
+
static inline int security_task_setrlimit(struct task_struct *p,
unsigned int resource,
struct rlimit *new_rlim)
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 4fc222f8755d..9edec926e9d9 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -10,8 +10,7 @@ struct task_struct;
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
- struct kern_ipc_perm ____cacheline_aligned_in_smp
- sem_perm; /* permissions .. see ipc.h */
+ struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
time_t sem_ctime; /* last change time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head pending_alter; /* pending operations */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 9519da6253a8..e69402d4a8ae 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/termios.h>
+#include <linux/delay.h>
struct serdev_controller;
struct serdev_device;
@@ -39,12 +41,16 @@ struct serdev_device_ops {
* @nr: Device number on serdev bus.
* @ctrl: serdev controller managing this device.
* @ops: Device operations.
+ * @write_comp Completion used by serdev_device_write() internally
+ * @write_lock Lock to serialize access when writing data
*/
struct serdev_device {
struct device dev;
int nr;
struct serdev_controller *ctrl;
const struct serdev_device_ops *ops;
+ struct completion write_comp;
+ struct mutex write_lock;
};
static inline struct serdev_device *to_serdev_device(struct device *d)
@@ -81,6 +87,9 @@ struct serdev_controller_ops {
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
+ void (*wait_until_sent)(struct serdev_controller *, long);
+ int (*get_tiocm)(struct serdev_controller *);
+ int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
};
/**
@@ -165,7 +174,7 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
if (!serdev || !serdev->ops->write_wakeup)
return;
- serdev->ops->write_wakeup(ctrl->serdev);
+ serdev->ops->write_wakeup(serdev);
}
static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
@@ -177,7 +186,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
if (!serdev || !serdev->ops->receive_buf)
return -EINVAL;
- return serdev->ops->receive_buf(ctrl->serdev, data, count);
+ return serdev->ops->receive_buf(serdev, data, count);
}
#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
@@ -187,6 +196,11 @@ void serdev_device_close(struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+void serdev_device_wait_until_sent(struct serdev_device *, long);
+int serdev_device_get_tiocm(struct serdev_device *);
+int serdev_device_set_tiocm(struct serdev_device *, int, int);
+void serdev_device_write_wakeup(struct serdev_device *);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -223,7 +237,23 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
return 0;
}
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
-static inline int serdev_device_write_buf(struct serdev_device *sdev, const unsigned char *buf, size_t count)
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *buf,
+ size_t count)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
+static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
+ size_t count, unsigned long timeout)
{
return -ENODEV;
}
@@ -238,6 +268,36 @@ static inline int serdev_device_write_room(struct serdev_device *sdev)
#endif /* CONFIG_SERIAL_DEV_BUS */
+static inline bool serdev_device_get_cts(struct serdev_device *serdev)
+{
+ int status = serdev_device_get_tiocm(serdev);
+ return !!(status & TIOCM_CTS);
+}
+
+static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms)
+{
+ unsigned long timeout;
+ bool signal;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ while (time_is_after_jiffies(timeout)) {
+ signal = serdev_device_get_cts(serdev);
+ if (signal == state)
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable)
+{
+ if (enable)
+ return serdev_device_set_tiocm(serdev, TIOCM_RTS, 0);
+ else
+ return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
+}
+
/*
* serdev hooks into TTY core
*/
@@ -248,7 +308,7 @@ struct tty_driver;
struct device *serdev_tty_port_register(struct tty_port *port,
struct device *parent,
struct tty_driver *drv, int idx);
-void serdev_tty_port_unregister(struct tty_port *port);
+int serdev_tty_port_unregister(struct tty_port *port);
#else
static inline struct device *serdev_tty_port_register(struct tty_port *port,
struct device *parent,
@@ -256,7 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
{
return ERR_PTR(-ENODEV);
}
-static inline void serdev_tty_port_unregister(struct tty_port *port) {}
+static inline int serdev_tty_port_unregister(struct tty_port *port)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 58484fb35cc8..64d892f1e5cd 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -247,6 +247,7 @@ struct uart_port {
unsigned char suspended;
unsigned char irq_wake;
unsigned char unused[2];
+ const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 94ad6eea9550..1f5a16620693 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -390,10 +390,6 @@ int unhandled_signal(struct task_struct *tsk, int sig);
#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK)
#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK)
-#define sig_user_defined(t, signr) \
- (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
- ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
-
#define sig_fatal(t, signr) \
(!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
(t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c776abd86937..a098d95b3d84 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -413,14 +413,15 @@ struct ubuf_info {
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
+ unsigned short _unused;
unsigned char nr_frags;
__u8 tx_flags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
- unsigned short gso_type;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
+ unsigned int gso_type;
u32 tskey;
__be32 ip6_frag_id;
@@ -491,6 +492,8 @@ enum {
SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
SKB_GSO_SCTP = 1 << 15,
+
+ SKB_GSO_ESP = 1 << 16,
};
#if BITS_PER_LONG > 32
@@ -3113,7 +3116,7 @@ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
- return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+ return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3c37a8c51921..04a7f7993e67 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -28,7 +28,7 @@
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
/*
- * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
+ * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
@@ -61,8 +61,10 @@
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
+ *
+ * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8e0cb7a0f836..68123c1fe549 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
+extern int __boot_cpu_id;
+
+static inline int get_boot_cpu_id(void)
+{
+ return __boot_cpu_id;
+}
+
#else /* !SMP */
static inline void smp_send_stop(void) { }
@@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); }
static inline void smp_init(void) { }
#endif
+static inline int get_boot_cpu_id(void)
+{
+ return 0;
+}
+
#endif /* !SMP */
/*
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
deleted file mode 100644
index f148e0ffbec7..000000000000
--- a/include/linux/soc/qcom/smd.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#ifndef __QCOM_SMD_H__
-#define __QCOM_SMD_H__
-
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-
-struct qcom_smd;
-struct qcom_smd_channel;
-struct qcom_smd_lookup;
-
-/**
- * struct qcom_smd_id - struct used for matching a smd device
- * @name: name of the channel
- */
-struct qcom_smd_id {
- char name[20];
-};
-
-/**
- * struct qcom_smd_device - smd device struct
- * @dev: the device struct
- * @channel: handle to the smd channel for this device
- */
-struct qcom_smd_device {
- struct device dev;
- struct qcom_smd_channel *channel;
-};
-
-typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t);
-
-/**
- * struct qcom_smd_driver - smd driver struct
- * @driver: underlying device driver
- * @smd_match_table: static channel match table
- * @probe: invoked when the smd channel is found
- * @remove: invoked when the smd channel is closed
- * @callback: invoked when an inbound message is received on the channel,
- * should return 0 on success or -EBUSY if the data cannot be
- * consumed at this time
- */
-struct qcom_smd_driver {
- struct device_driver driver;
- const struct qcom_smd_id *smd_match_table;
-
- int (*probe)(struct qcom_smd_device *dev);
- void (*remove)(struct qcom_smd_device *dev);
- qcom_smd_cb_t callback;
-};
-
-#if IS_ENABLED(CONFIG_QCOM_SMD)
-
-int qcom_smd_driver_register(struct qcom_smd_driver *drv);
-void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
-
-struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
- const char *name,
- qcom_smd_cb_t cb);
-void qcom_smd_close_channel(struct qcom_smd_channel *channel);
-void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
-void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
-
-
-struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
- struct device_node *node);
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
-
-#else
-
-static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
-{
- return -ENXIO;
-}
-
-static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-}
-
-static inline struct qcom_smd_channel *
-qcom_smd_open_channel(struct qcom_smd_channel *channel,
- const char *name,
- qcom_smd_cb_t cb)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return NULL;
-}
-
-static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-}
-
-static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return NULL;
-}
-
-static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-}
-
-static inline int qcom_smd_send(struct qcom_smd_channel *channel,
- const void *data, int len)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return -ENXIO;
-}
-
-static inline struct qcom_smd_edge *
-qcom_smd_register_edge(struct device *parent,
- struct device_node *node)
-{
- return ERR_PTR(-ENXIO);
-}
-
-static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return -ENXIO;
-}
-
-#endif
-
-#define module_qcom_smd_driver(__smd_driver) \
- module_driver(__smd_driver, qcom_smd_driver_register, \
- qcom_smd_driver_unregister)
-
-
-#endif
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
index eab64976a73b..a4dd4d7c711d 100644
--- a/include/linux/soc/qcom/wcnss_ctrl.h
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -1,16 +1,19 @@
#ifndef __WCNSS_CTRL_H__
#define __WCNSS_CTRL_H__
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL)
-struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name,
+ rpmsg_rx_cb_t cb, void *priv);
#else
-static inline struct qcom_smd_channel*
-qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss,
+ const char *name,
+ rpmsg_rx_cb_t cb,
+ void *priv)
{
WARN_ON(1);
return ERR_PTR(-ENXIO);
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
index a18e0783946b..787e7ad53d45 100644
--- a/include/linux/soc/renesas/rcar-rst.h
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -1,6 +1,11 @@
#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__
#define __LINUX_SOC_RENESAS_RCAR_RST_H__
+#if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \
+ defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796)
int rcar_rst_read_mode_pins(u32 *mode);
+#else
+static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
+#endif
#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 49df0a01a2cc..bebdde5dccd6 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - Power management unit definition
@@ -50,6 +50,14 @@
#define S5P_WAKEUP_MASK 0x0608
#define S5P_WAKEUP_MASK2 0x0614
+/* MIPI_PHYn_CONTROL, valid for Exynos3250, Exynos4, Exynos5250 and Exynos5433 */
+#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4)
+/* Phy enable bit, common for all phy registers, not only MIPI */
+#define EXYNOS4_PHY_ENABLE (1 << 0)
+#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
#define S5P_INFORM5 0x0814
@@ -342,6 +350,8 @@
#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
+#define EXYNOS5_USBDRD_PHY_CONTROL 0x0704
+#define EXYNOS5_DPTX_PHY_CONTROL 0x0720
#define EXYNOS5_USE_RETENTION BIT(4)
#define EXYNOS5_SYS_WDTRESET (1 << 20)
@@ -495,6 +505,9 @@
#define EXYNOS5420_KFC_CORE_RESET(_nr) \
((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+#define EXYNOS5420_USBDRD1_PHY_CONTROL 0x0708
+#define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4)
+#define EXYNOS5420_DPTX_PHY_CONTROL 0x0728
#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020
#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024
#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028
@@ -632,6 +645,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
/* For EXYNOS5433 */
+#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108)
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index a0596ca0e80a..a2f8109bb215 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -24,6 +24,7 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+u64 sock_gen_cookie(struct sock *sk);
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 75c6bd0ac605..935bd2854ff1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,7 @@
#include <linux/scatterlist.h>
struct dma_chan;
+struct property_entry;
struct spi_master;
struct spi_transfer;
struct spi_flash_read_message;
@@ -375,6 +376,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_message: undo any work done by prepare_message().
* @spi_flash_read: to support spi-controller hardwares that provide
* accelerated interface to read from flash devices.
+ * @spi_flash_can_dma: analogous to can_dma() interface, but for
+ * controllers implementing spi_flash_read.
* @flash_read_supported: spi device supports flash read
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
@@ -538,6 +541,8 @@ struct spi_master {
struct spi_message *message);
int (*spi_flash_read)(struct spi_device *spi,
struct spi_flash_read_message *msg);
+ bool (*spi_flash_can_dma)(struct spi_device *spi,
+ struct spi_flash_read_message *msg);
bool (*flash_read_supported)(struct spi_device *spi);
/*
@@ -891,7 +896,7 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags
unsigned i;
struct spi_transfer *t = (struct spi_transfer *)(m + 1);
- INIT_LIST_HEAD(&m->transfers);
+ spi_message_init_no_memset(m);
for (i = 0; i < ntrans; i++, t++)
spi_message_add_tail(t, m);
}
@@ -1209,6 +1214,7 @@ int spi_flash_read(struct spi_device *spi,
* @modalias: Initializes spi_device.modalias; identifies the driver.
* @platform_data: Initializes spi_device.platform_data; the particular
* data stored there is driver-specific.
+ * @properties: Additional device properties for the device.
* @controller_data: Initializes spi_device.controller_data; some
* controllers need hints about hardware setup, e.g. for DMA.
* @irq: Initializes spi_device.irq; depends on how the board is wired.
@@ -1241,10 +1247,12 @@ struct spi_board_info {
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
+ * device properties are copied and attached to spi_device,
* irq is copied too
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
+ const struct property_entry *properties;
void *controller_data;
int irq;
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 00a21166e268..db42746bdfea 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -20,6 +20,8 @@
#define SPLICE_F_MORE (0x04) /* expect more data */
#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
+#define SPLICE_F_ALL (SPLICE_F_MOVE|SPLICE_F_NONBLOCK|SPLICE_F_MORE|SPLICE_F_GIFT)
+
/*
* Passed to the actors
*/
@@ -55,7 +57,6 @@ struct splice_pipe_desc {
struct partial_page *partial; /* pages[] may not be contig */
int nr_pages; /* number of populated pages in map */
unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
- unsigned int flags; /* splice flags */
const struct pipe_buf_operations *ops;/* ops associated with output pipe */
void (*spd_release)(struct splice_pipe_desc *, unsigned int);
};
@@ -82,7 +83,6 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
*/
extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
extern void splice_shrink_spd(struct splice_pipe_desc *);
-extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
extern const struct pipe_buf_operations default_pipe_buf_ops;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a598cf3ac70c..167ad8831aaf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -22,7 +22,7 @@
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
+ * Documentation/RCU/ *.txt
*
*/
@@ -32,35 +32,9 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
+#include <linux/rcu_segcblist.h>
-struct srcu_array {
- unsigned long lock_count[2];
- unsigned long unlock_count[2];
-};
-
-struct rcu_batch {
- struct rcu_head *head, **tail;
-};
-
-#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
-
-struct srcu_struct {
- unsigned long completed;
- struct srcu_array __percpu *per_cpu_ref;
- spinlock_t queue_lock; /* protect ->batch_queue, ->running */
- bool running;
- /* callbacks just queued */
- struct rcu_batch batch_queue;
- /* callbacks try to do the first check_zero */
- struct rcu_batch batch_check0;
- /* callbacks done with the first check_zero and the flip */
- struct rcu_batch batch_check1;
- struct rcu_batch batch_done;
- struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-};
+struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -82,46 +56,15 @@ int init_srcu_struct(struct srcu_struct *sp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-void process_srcu(struct work_struct *work);
-
-#define __SRCU_STRUCT_INIT(name) \
- { \
- .completed = -300, \
- .per_cpu_ref = &name##_srcu_array, \
- .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
- .running = false, \
- .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
- .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
- .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
- .batch_done = RCU_BATCH_INIT(name.batch_done), \
- .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
- __SRCU_DEP_MAP_INIT(name) \
- }
-
-/*
- * Define and initialize a srcu struct at build time.
- * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
- *
- * Note that although DEFINE_STATIC_SRCU() hides the name from other
- * files, the per-CPU variable rules nevertheless require that the
- * chosen name be globally unique. These rules also prohibit use of
- * DEFINE_STATIC_SRCU() within a function. If these rules are too
- * restrictive, declare the srcu_struct manually. For example, in
- * each file:
- *
- * static struct srcu_struct my_srcu;
- *
- * Then, before the first use of each my_srcu, manually initialize it:
- *
- * init_srcu_struct(&my_srcu);
- *
- * See include/linux/percpu-defs.h for the rules on per-CPU variables.
- */
-#define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
-#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+#ifdef CONFIG_TINY_SRCU
+#include <linux/srcutiny.h>
+#elif defined(CONFIG_TREE_SRCU)
+#include <linux/srcutree.h>
+#elif defined(CONFIG_CLASSIC_SRCU)
+#include <linux/srcuclassic.h>
+#else
+#error "Unknown SRCU implementation specified to kernel configuration"
+#endif
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -147,9 +90,6 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp);
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-unsigned long srcu_batches_completed(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h
new file mode 100644
index 000000000000..5753f7322262
--- /dev/null
+++ b/include/linux/srcuclassic.h
@@ -0,0 +1,115 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * classic v4.11 variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_CLASSIC_H
+#define _LINUX_SRCU_CLASSIC_H
+
+struct srcu_array {
+ unsigned long lock_count[2];
+ unsigned long unlock_count[2];
+};
+
+struct rcu_batch {
+ struct rcu_head *head, **tail;
+};
+
+#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
+
+struct srcu_struct {
+ unsigned long completed;
+ struct srcu_array __percpu *per_cpu_ref;
+ spinlock_t queue_lock; /* protect ->batch_queue, ->running */
+ bool running;
+ /* callbacks just queued */
+ struct rcu_batch batch_queue;
+ /* callbacks try to do the first check_zero */
+ struct rcu_batch batch_check0;
+ /* callbacks done with the first check_zero and the flip */
+ struct rcu_batch batch_check1;
+ struct rcu_batch batch_done;
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .completed = -300, \
+ .per_cpu_ref = &name##_srcu_array, \
+ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
+ .running = false, \
+ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
+ .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
+ .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
+ .batch_done = RCU_BATCH_INIT(name.batch_done), \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = sp->completed;
+ *gpnum = *completed;
+ if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head)
+ (*gpnum)++;
+}
+
+#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
new file mode 100644
index 000000000000..42311ee0334f
--- /dev/null
+++ b/include/linux/srcutiny.h
@@ -0,0 +1,93 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tiny variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_TINY_H
+#define _LINUX_SRCU_TINY_H
+
+#include <linux/swait.h>
+
+struct srcu_struct {
+ int srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
+ struct swait_queue_head srcu_wq;
+ /* Last srcu_read_unlock() wakes GP. */
+ unsigned long srcu_gp_seq; /* GP seq # for callback tagging. */
+ struct rcu_segcblist srcu_cblist;
+ /* Pending SRCU callbacks. */
+ int srcu_idx; /* Current reader array element. */
+ bool srcu_gp_running; /* GP workqueue running? */
+ bool srcu_gp_waiting; /* GP waiting for readers? */
+ struct work_struct srcu_work; /* For driving grace periods. */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+void srcu_drive_gp(struct work_struct *wp);
+
+#define __SRCU_STRUCT_INIT(name) \
+{ \
+ .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
+ .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist), \
+ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
+ __SRCU_DEP_MAP_INIT(name) \
+}
+
+/*
+ * This odd _STATIC_ arrangement is needed for API compatibility with
+ * Tree SRCU, which needs some per-CPU data.
+ */
+#define DEFINE_SRCU(name) \
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_STATIC_SRCU(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+
+void synchronize_srcu(struct srcu_struct *sp);
+
+static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ synchronize_srcu(sp);
+}
+
+static inline void srcu_barrier(struct srcu_struct *sp)
+{
+ synchronize_srcu(sp);
+}
+
+static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
+{
+ return 0;
+}
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = sp->srcu_gp_seq;
+ *gpnum = *completed;
+}
+
+#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
new file mode 100644
index 000000000000..32e86d85fd11
--- /dev/null
+++ b/include/linux/srcutree.h
@@ -0,0 +1,150 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tree variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_TREE_H
+#define _LINUX_SRCU_TREE_H
+
+#include <linux/rcu_node_tree.h>
+#include <linux/completion.h>
+
+struct srcu_node;
+struct srcu_struct;
+
+/*
+ * Per-CPU structure feeding into leaf srcu_node, similar in function
+ * to rcu_node.
+ */
+struct srcu_data {
+ /* Read-side state. */
+ unsigned long srcu_lock_count[2]; /* Locks per CPU. */
+ unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
+
+ /* Update-side state. */
+ spinlock_t lock ____cacheline_internodealigned_in_smp;
+ struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
+ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ bool srcu_cblist_invoking; /* Invoking these CBs? */
+ struct delayed_work work; /* Context for CB invoking. */
+ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
+ struct srcu_node *mynode; /* Leaf srcu_node. */
+ unsigned long grpmask; /* Mask for leaf srcu_node */
+ /* ->srcu_data_have_cbs[]. */
+ int cpu;
+ struct srcu_struct *sp;
+};
+
+/*
+ * Node in SRCU combining tree, similar in function to rcu_data.
+ */
+struct srcu_node {
+ spinlock_t lock;
+ unsigned long srcu_have_cbs[4]; /* GP seq for children */
+ /* having CBs, but only */
+ /* is > ->srcu_gq_seq. */
+ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
+ /* have CBs for given GP? */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ struct srcu_node *srcu_parent; /* Next up in tree. */
+ int grplo; /* Least CPU for node. */
+ int grphi; /* Biggest CPU for node. */
+};
+
+/*
+ * Per-SRCU-domain structure, similar in function to rcu_state.
+ */
+struct srcu_struct {
+ struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
+ struct srcu_node *level[RCU_NUM_LVLS + 1];
+ /* First node at each level. */
+ struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
+ spinlock_t gp_lock; /* protect ->srcu_cblist */
+ struct mutex srcu_gp_mutex; /* Serialize GP work. */
+ unsigned int srcu_idx; /* Current rdr array element. */
+ unsigned long srcu_gp_seq; /* Grace-period seq #. */
+ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
+ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
+ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
+ struct completion srcu_barrier_completion;
+ /* Awaken barrier rq at end. */
+ atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
+ /* callback for the barrier */
+ /* operation. */
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+/* Values for state variable (bottom bits of ->srcu_gp_seq). */
+#define SRCU_STATE_IDLE 0
+#define SRCU_STATE_SCAN1 1
+#define SRCU_STATE_SCAN2 2
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .sda = &name##_srcu_data, \
+ .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \
+ .srcu_gp_seq_needed = 0 - 1, \
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+
+void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum, unsigned long *completed);
+
+#endif
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 0a34489a46b6..4205f71a5f0e 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -18,6 +18,8 @@ extern void save_stack_trace_regs(struct pt_regs *regs,
struct stack_trace *trace);
extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
+extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
+ struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
extern int snprint_stack_trace(char *buf, size_t size,
@@ -29,12 +31,13 @@ extern void save_stack_trace_user(struct stack_trace *trace);
# define save_stack_trace_user(trace) do { } while (0)
#endif
-#else
+#else /* !CONFIG_STACKTRACE */
# define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
-#endif
+# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
+#endif /* CONFIG_STACKTRACE */
-#endif
+#endif /* __LINUX_STACKTRACE_H */
diff --git a/include/linux/stat.h b/include/linux/stat.h
index c76e524fb34b..64b6b3aece21 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -26,6 +26,7 @@ struct kstat {
unsigned int nlink;
uint32_t blksize; /* Preferred I/O size */
u64 attributes;
+ u64 attributes_mask;
#define KSTAT_ATTR_FS_IOC_FLAGS \
(STATX_ATTR_COMPRESSED | \
STATX_ATTR_IMMUTABLE | \
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index fc273e9d5f67..3921cb9dfadb 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,9 @@
#include <linux/platform_device.h>
+#define MTL_MAX_RX_QUEUES 8
+#define MTL_MAX_TX_QUEUES 8
+
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
#define STMMAC_RX_COE_TYPE2 2
@@ -44,6 +47,18 @@
#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+/* MTL algorithms identifiers */
+#define MTL_TX_ALGORITHM_WRR 0x0
+#define MTL_TX_ALGORITHM_WFQ 0x1
+#define MTL_TX_ALGORITHM_DWRR 0x2
+#define MTL_TX_ALGORITHM_SP 0x3
+#define MTL_RX_ALGORITHM_SP 0x4
+#define MTL_RX_ALGORITHM_WSP 0x5
+
+/* RX/TX Queue Mode */
+#define MTL_QUEUE_AVB 0x0
+#define MTL_QUEUE_DCB 0x1
+
/* The MDC clock could be set higher than the IEEE 802.3
* specified frequency limit 0f 2.5 MHz, by programming a clock divider
* of value different than the above defined values. The resultant MDIO
@@ -109,6 +124,26 @@ struct stmmac_axi {
bool axi_rb;
};
+struct stmmac_rxq_cfg {
+ u8 mode_to_use;
+ u8 chan;
+ u8 pkt_route;
+ bool use_prio;
+ u32 prio;
+};
+
+struct stmmac_txq_cfg {
+ u8 weight;
+ u8 mode_to_use;
+ /* Credit Base Shaper parameters */
+ u32 send_slope;
+ u32 idle_slope;
+ u32 high_credit;
+ u32 low_credit;
+ bool use_prio;
+ u32 prio;
+};
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
@@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u8 rx_queues_to_use;
+ u8 tx_queues_to_use;
+ u8 rx_sched_algorithm;
+ u8 tx_sched_algorithm;
+ struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
+ struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
void (*fix_mac_speed)(void *priv, unsigned int speed);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
diff --git a/include/linux/string.h b/include/linux/string.h
index 26b6f6a66f83..537918f8a98e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -114,6 +114,14 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
+#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
+static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
+ size_t cnt)
+{
+ memcpy(dst, src, cnt);
+ return 0;
+}
+#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
@@ -135,6 +143,16 @@ static inline int strtobool(const char *s, bool *res)
}
int match_string(const char * const *array, size_t n, const char *string);
+int __sysfs_match_string(const char * const *array, size_t n, const char *s);
+
+/**
+ * sysfs_match_string - matches given string in an array
+ * @_a: array of strings
+ * @_s: string to match with
+ *
+ * Helper for __sysfs_match_string(). Calculates the size of @a automatically.
+ */
+#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 245fc59b7324..b7e85b341a54 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -143,6 +143,9 @@ enum rpcrdma_proc {
#define rdma_done cpu_to_be32(RDMA_DONE)
#define rdma_error cpu_to_be32(RDMA_ERROR)
+#define err_vers cpu_to_be32(ERR_VERS)
+#define err_chunk cpu_to_be32(ERR_CHUNK)
+
/*
* Private extension to RPC-over-RDMA Version One.
* Message passed during RDMA-CM connection set-up.
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index e770abeed32d..94631026f79c 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -336,8 +336,7 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
{
char *cp = (char *)p;
struct kvec *vec = &rqstp->rq_arg.head[0];
- return cp >= (char*)vec->iov_base
- && cp <= (char*)vec->iov_base + vec->iov_len;
+ return cp == (char *)vec->iov_base + vec->iov_len;
}
static inline int
@@ -474,6 +473,7 @@ void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
void svc_shutdown_net(struct svc_serv *, struct net *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index b105f73e3ca2..f3787d800ba4 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,6 +48,12 @@
#include <rdma/rdma_cm.h>
#define SVCRDMA_DEBUG
+/* Default and maximum inline threshold sizes */
+enum {
+ RPCRDMA_DEF_INLINE_THRESH = 4096,
+ RPCRDMA_MAX_INLINE_THRESH = 65536
+};
+
/* RPC/RDMA parameters and stats */
extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
@@ -85,27 +91,11 @@ struct svc_rdma_op_ctxt {
enum dma_data_direction direction;
int count;
unsigned int mapped_sges;
- struct ib_sge sge[RPCSVC_MAXPAGES];
+ struct ib_send_wr send_wr;
+ struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
struct page *pages[RPCSVC_MAXPAGES];
};
-/*
- * NFS_ requests are mapped on the client side by the chunk lists in
- * the RPCRDMA header. During the fetching of the RPC from the client
- * and the writing of the reply to the client, the memory in the
- * client and the memory in the server must be mapped as contiguous
- * vaddr/len for access by the hardware. These data strucures keep
- * these mappings.
- *
- * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
- * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
- * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
- * mapping of the reply.
- */
-struct svc_rdma_chunk_sge {
- int start; /* sge no for this chunk */
- int count; /* sge count for this chunk */
-};
struct svc_rdma_fastreg_mr {
struct ib_mr *mr;
struct scatterlist *sg;
@@ -114,15 +104,7 @@ struct svc_rdma_fastreg_mr {
enum dma_data_direction direction;
struct list_head frmr_list;
};
-struct svc_rdma_req_map {
- struct list_head free;
- unsigned long count;
- union {
- struct kvec sge[RPCSVC_MAXPAGES];
- struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
- unsigned long lkey[RPCSVC_MAXPAGES];
- };
-};
+
#define RDMACTXT_F_LAST_CTXT 2
#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */
@@ -144,14 +126,15 @@ struct svcxprt_rdma {
u32 sc_max_requests; /* Max requests */
u32 sc_max_bc_requests;/* Backward credits */
int sc_max_req_size; /* Size of each RQ WR buf */
+ u8 sc_port_num;
struct ib_pd *sc_pd;
spinlock_t sc_ctxt_lock;
struct list_head sc_ctxts;
int sc_ctxt_used;
- spinlock_t sc_map_lock;
- struct list_head sc_maps;
+ spinlock_t sc_rw_ctxt_lock;
+ struct list_head sc_rw_ctxts;
struct list_head sc_rq_dto_q;
spinlock_t sc_rq_dto_lock;
@@ -181,9 +164,7 @@ struct svcxprt_rdma {
/* The default ORD value is based on two outstanding full-size writes with a
* page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
#define RPCRDMA_ORD (64/4)
-#define RPCRDMA_SQ_DEPTH_MULT 8
#define RPCRDMA_MAX_REQUESTS 32
-#define RPCRDMA_MAX_REQ_SIZE 4096
/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
* current NFSv4.1 implementation supports one backchannel slot.
@@ -201,19 +182,11 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
/* svc_rdma_backchannel.c */
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
- struct rpcrdma_msg *rmsgp,
+ __be32 *rdma_resp,
struct xdr_buf *rcvbuf);
/* svc_rdma_marshal.c */
extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
-extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
- struct rpcrdma_msg *,
- enum rpcrdma_errcode, __be32 *);
-extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
-extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
-extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
- __be32, __be64, u32);
-extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp);
/* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *);
@@ -224,16 +197,25 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
struct svc_rdma_op_ctxt *, int *, u32 *,
u32, u32, u64, bool);
+/* svc_rdma_rw.c */
+extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
+extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
+ __be32 *wr_ch, struct xdr_buf *xdr);
+extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
+ __be32 *rp_ch, bool writelist,
+ struct xdr_buf *xdr);
+
/* svc_rdma_sendto.c */
-extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
- struct svc_rdma_req_map *, bool);
+extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
+ struct svc_rdma_op_ctxt *ctxt,
+ __be32 *rdma_resp, unsigned int len);
+extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
+ struct svc_rdma_op_ctxt *ctxt,
+ int num_sge, u32 inv_rkey);
extern int svc_rdma_sendto(struct svc_rqst *);
-extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
- int);
/* svc_rdma_transport.c */
extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
-extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
@@ -244,9 +226,6 @@ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
-extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *);
-extern void svc_rdma_put_req_map(struct svcxprt_rdma *,
- struct svc_rdma_req_map *);
extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
struct svc_rdma_fastreg_mr *);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d9718378a8be..0b1cf32edfd7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
struct platform_freeze_ops {
int (*begin)(void);
int (*prepare)(void);
+ void (*wake)(void);
+ void (*sync)(void);
void (*restore)(void);
void (*end)(void);
};
@@ -428,7 +430,8 @@ extern unsigned int pm_wakeup_irq;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
-extern void pm_wakeup_clear(void);
+extern void pm_system_cancel_wakeup(void);
+extern void pm_wakeup_clear(bool reset);
extern void pm_system_irq_wakeup(unsigned int irq_number);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
@@ -478,7 +481,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(void) {}
+static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline void lock_system_sleep(void) {}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 45e91dd6716d..ba5882419a7d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -279,7 +279,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
-extern void deactivate_page(struct page *page);
+extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
@@ -411,9 +411,6 @@ struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
-extern int get_swap_slots(int n, swp_entry_t *slots);
-extern void swapcache_free_batch(swp_entry_t *entries, int n);
-
#else /* CONFIG_SWAP */
#define swap_address_space(entry) (NULL)
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b7e82049fec7..80d07816def0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -180,7 +180,6 @@ extern void setup_sysctl_set(struct ctl_table_set *p,
int (*is_seen)(struct ctl_table_set *));
extern void retire_sysctl_set(struct ctl_table_set *set);
-void register_sysctl_root(struct ctl_table_root *root);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
const char *path, struct ctl_table *table);
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 9fba9dd33544..9375d23a24e7 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -34,9 +34,9 @@ struct t10_pi_tuple {
};
-extern struct blk_integrity_profile t10_pi_type1_crc;
-extern struct blk_integrity_profile t10_pi_type1_ip;
-extern struct blk_integrity_profile t10_pi_type3_crc;
-extern struct blk_integrity_profile t10_pi_type3_ip;
+extern const struct blk_integrity_profile t10_pi_type1_crc;
+extern const struct blk_integrity_profile t10_pi_type1_ip;
+extern const struct blk_integrity_profile t10_pi_type3_crc;
+extern const struct blk_integrity_profile t10_pi_type3_ip;
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index cfc2d9506ce8..b6d5adcee8fc 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -233,12 +233,14 @@ struct tcp_sock {
u8 syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
+ syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */
+ struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
@@ -331,16 +333,16 @@ struct tcp_sock {
/* Receiver side RTT estimation */
struct {
- u32 rtt;
- u32 seq;
- u32 time;
+ u32 rtt_us;
+ u32 seq;
+ struct skb_mstamp time;
} rcv_rtt_est;
/* Receiver queue space */
struct {
- int space;
- u32 seq;
- u32 time;
+ int space;
+ u32 seq;
+ struct skb_mstamp time;
} rcvq_space;
/* TCP-specific MTU probe information. */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
new file mode 100644
index 000000000000..0f175b8f6456
--- /dev/null
+++ b/include/linux/tee_drv.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEE_DRV_H
+#define __TEE_DRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/tee.h>
+
+/*
+ * The file describes the API provided by the generic TEE driver to the
+ * specific TEE driver.
+ */
+
+#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
+#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
+
+struct tee_device;
+struct tee_shm;
+struct tee_shm_pool;
+
+/**
+ * struct tee_context - driver specific context on file pointer data
+ * @teedev: pointer to this drivers struct tee_device
+ * @list_shm: List of shared memory object owned by this context
+ * @data: driver specific context data, managed by the driver
+ */
+struct tee_context {
+ struct tee_device *teedev;
+ struct list_head list_shm;
+ void *data;
+};
+
+struct tee_param_memref {
+ size_t shm_offs;
+ size_t size;
+ struct tee_shm *shm;
+};
+
+struct tee_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+struct tee_param {
+ u64 attr;
+ union {
+ struct tee_param_memref memref;
+ struct tee_param_value value;
+ } u;
+};
+
+/**
+ * struct tee_driver_ops - driver operations vtable
+ * @get_version: returns version of driver
+ * @open: called when the device file is opened
+ * @release: release this open file
+ * @open_session: open a new session
+ * @close_session: close a session
+ * @invoke_func: invoke a trusted function
+ * @cancel_req: request cancel of an ongoing invoke or open
+ * @supp_revc: called for supplicant to get a command
+ * @supp_send: called for supplicant to send a response
+ */
+struct tee_driver_ops {
+ void (*get_version)(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers);
+ int (*open)(struct tee_context *ctx);
+ void (*release)(struct tee_context *ctx);
+ int (*open_session)(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+ int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+ int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
+ int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+ int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+};
+
+/**
+ * struct tee_desc - Describes the TEE driver to the subsystem
+ * @name: name of driver
+ * @ops: driver operations vtable
+ * @owner: module providing the driver
+ * @flags: Extra properties of driver, defined by TEE_DESC_* below
+ */
+#define TEE_DESC_PRIVILEGED 0x1
+struct tee_desc {
+ const char *name;
+ const struct tee_driver_ops *ops;
+ struct module *owner;
+ u32 flags;
+};
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev);
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev);
+
+/**
+ * struct tee_shm_pool_mem_info - holds information needed to create a shared
+ * memory pool
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ */
+struct tee_shm_pool_mem_info {
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info: Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+ struct tee_shm_pool_mem_info *dmabuf_info);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * The must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev);
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
+ * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
+ * with a dma-buf handle, else driver private memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm: Shared memory handle
+ * @va: Virtual address to tranlsate
+ * @pa: Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm: Shared memory handle
+ * @pa: Physical address to tranlsate
+ * @va: Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+int tee_shm_get_id(struct tee_shm *shm);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+
+#endif /*__TEE_DRV_H*/
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 58373875e8ee..d7d3ea637dd0 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -22,6 +22,18 @@
#endif
#include <linux/bitops.h>
+
+/*
+ * For per-arch arch_within_stack_frames() implementations, defined in
+ * asm/thread_info.h.
+ */
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
#include <asm/thread_info.h>
#ifdef __KERNEL__
@@ -101,6 +113,10 @@ static inline void check_object_size(const void *ptr, unsigned long n,
{ }
#endif /* CONFIG_HARDENED_USERCOPY */
+#ifndef arch_setup_new_exec
+static inline void arch_setup_new_exec(void) { }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index a04fea19676f..fe01e68bf520 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -117,6 +117,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
+extern unsigned long tick_nohz_get_idle_calls(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/time.h b/include/linux/time.h
index 23f0f5ce3090..c0543f5f25de 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -151,9 +151,6 @@ static inline bool timespec_inject_offset_valid(const struct timespec *ts)
return true;
}
-#define CURRENT_TIME (current_kernel_time())
-#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
-
/* Some architectures do not supply their own clocksource.
* This is mainly the case in architectures that get their
* inter-tick times by reading the counter on their interval
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index b598cbc7b576..ddc229ff6d1e 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -19,21 +19,6 @@ extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
-static inline int do_sys_settimeofday(const struct timespec *tv,
- const struct timezone *tz)
-{
- struct timespec64 ts64;
-
- if (!tv)
- return do_sys_settimeofday64(NULL, tz);
-
- if (!timespec_valid(tv))
- return -EINVAL;
-
- ts64 = timespec_to_timespec64(*tv);
- return do_sys_settimeofday64(&ts64, tz);
-}
-
/*
* Kernel time accessors
*/
@@ -273,6 +258,11 @@ static inline void timekeeping_clocktai(struct timespec *ts)
*ts = ktime_to_timespec(ktime_get_clocktai());
}
+static inline void timekeeping_clocktai64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_clocktai());
+}
+
/*
* RTC specific
*/
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index da158f06e0b2..5a090f5ab335 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -48,7 +48,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
-
+ int (*request_locality)(struct tpm_chip *chip, int loc);
+ void (*relinquish_locality)(struct tpm_chip *chip, int loc);
};
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 0af63c4381b9..a556805eff8a 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -138,16 +138,7 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
-/*
- * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
- * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
- * simplifies those functions and keeps them in sync.
- */
-static inline enum print_line_t trace_handle_return(struct trace_seq *s)
-{
- return trace_seq_has_overflowed(s) ?
- TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
-}
+enum print_line_t trace_handle_return(struct trace_seq *s);
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index f72fcfe0e66a..cc48cb2ce209 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -128,7 +128,7 @@ extern void syscall_unregfunc(void);
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
-#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
+#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
@@ -136,7 +136,11 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- prercu; \
+ if (rcucheck) { \
+ if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
+ return; \
+ rcu_irq_enter_irqson(); \
+ } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
@@ -147,20 +151,19 @@ extern void syscall_unregfunc(void);
} while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
- postrcu; \
+ if (rcucheck) \
+ rcu_irq_exit_irqson(); \
} while (0)
#ifndef MODULE
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
- TP_CONDITION(cond), \
- rcu_irq_enter_irqson(), \
- rcu_irq_exit_irqson()); \
+ TP_CONDITION(cond), 1); \
}
#else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
@@ -186,7 +189,7 @@ extern void syscall_unregfunc(void);
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
- TP_CONDITION(cond),,); \
+ TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \
rcu_dereference_sched(__tracepoint_##name.funcs);\
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1017e904c0a3..eccb4ec30a8a 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -390,7 +390,6 @@ static inline bool tty_throttled(struct tty_struct *tty)
}
#ifdef CONFIG_TTY
-extern void console_init(void);
extern void tty_kref_put(struct tty_struct *tty);
extern struct pid *tty_get_pgrp(struct tty_struct *tty);
extern void tty_vhangup_self(void);
@@ -402,8 +401,6 @@ extern struct tty_struct *get_current_tty(void);
extern int __init tty_init(void);
extern const char *tty_name(const struct tty_struct *tty);
#else
-static inline void console_init(void)
-{ }
static inline void tty_kref_put(struct tty_struct *tty)
{ }
static inline struct pid *tty_get_pgrp(struct tty_struct *tty)
@@ -478,9 +475,13 @@ extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
extern int is_current_pgrp_orphaned(void);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
+extern void tty_vhangup_session(struct tty_struct *tty);
extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
+extern void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+extern int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+extern void session_clear_tty(struct pid *session);
extern void no_tty(void);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
@@ -528,6 +529,8 @@ extern void tty_ldisc_flush(struct tty_struct *tty);
extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
+extern long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct file *file, unsigned int cmd, unsigned long arg);
extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
extern void tty_default_fops(struct file_operations *fops);
extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
@@ -555,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device, void *drvdata,
const struct attribute_group **attr_grp);
+extern struct device *tty_port_register_device_serdev(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device);
+extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+extern void tty_port_unregister_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index);
extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_destroy(struct tty_port *port);
@@ -669,7 +681,11 @@ extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
/* n_tty.c */
extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+#ifdef CONFIG_TTY
extern void __init n_tty_init(void);
+#else
+static inline void n_tty_init(void) { }
+#endif
/* tty_audit.c */
#ifdef CONFIG_AUDIT
diff --git a/include/linux/types.h b/include/linux/types.h
index 1e7bd24848fc..258099a4ed82 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -209,7 +209,7 @@ struct ustat {
* naturally due ABI requirements, but some architectures (like CRIS) have
* weird ABI and we need to ask it explicitly.
*
- * The alignment is required to guarantee that bits 0 and 1 of @next will be
+ * The alignment is required to guarantee that bit 0 of @next will be
* clear under normal conditions -- as long as we use call_rcu(),
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index f30c187ed785..201418d5e15c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,8 +2,199 @@
#define __LINUX_UACCESS_H__
#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/kasan-checks.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
+
#include <asm/uaccess.h>
+/*
+ * Architectures should provide two primitives (raw_copy_{to,from}_user())
+ * and get rid of their private instances of copy_{to,from}_user() and
+ * __copy_{to,from}_user{,_inatomic}().
+ *
+ * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
+ * return the amount left to copy. They should assume that access_ok() has
+ * already been checked (and succeeded); they should *not* zero-pad anything.
+ * No KASAN or object size checks either - those belong here.
+ *
+ * Both of these functions should attempt to copy size bytes starting at from
+ * into the area starting at to. They must not fetch or store anything
+ * outside of those areas. Return value must be between 0 (everything
+ * copied successfully) and size (nothing copied).
+ *
+ * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
+ * at to must become equal to the bytes fetched from the corresponding area
+ * starting at from. All data past to + size - N must be left unmodified.
+ *
+ * If copying succeeds, the return value must be 0. If some data cannot be
+ * fetched, it is permitted to copy less than had been fetched; the only
+ * hard requirement is that not storing anything at all (i.e. returning size)
+ * should happen only when nothing could be copied. In other words, you don't
+ * have to squeeze as much as possible - it is allowed, but not necessary.
+ *
+ * For raw_copy_from_user() to always points to kernel memory and no faults
+ * on store should happen. Interpretation of from is affected by set_fs().
+ * For raw_copy_to_user() it's the other way round.
+ *
+ * Both can be inlined - it's up to architectures whether it wants to bother
+ * with that. They should not be used directly; they are used to implement
+ * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
+ * that are used instead. Out of those, __... ones are inlined. Plain
+ * copy_{to,from}_user() might or might not be inlined. If you want them
+ * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
+ *
+ * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
+ * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
+ * at all; their callers absolutely must check the return value.
+ *
+ * Biarch ones should also provide raw_copy_in_user() - similar to the above,
+ * but both source and destination are __user pointers (affected by set_fs()
+ * as usual) and both source and destination can trigger faults.
+ */
+
+static __always_inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+/**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ * The caller should also make sure he pins the user space address
+ * so that we don't result in page fault and sleep.
+ */
+static __always_inline unsigned long
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+#ifdef INLINE_COPY_FROM_USER
+static inline unsigned long
+_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+#else
+extern unsigned long
+_copy_from_user(void *, const void __user *, unsigned long);
+#endif
+
+#ifdef INLINE_COPY_TO_USER
+static inline unsigned long
+_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+#else
+extern unsigned long
+_copy_to_user(void __user *, const void *, unsigned long);
+#endif
+
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+static __always_inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+ kasan_check_write(to, n);
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
+ n = _copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ kasan_check_read(from, n);
+ might_fault();
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = _copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+#ifdef CONFIG_COMPAT
+static __always_inline unsigned long __must_check
+__copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ return raw_copy_in_user(to, from, n);
+}
+static __always_inline unsigned long __must_check
+copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+ n = raw_copy_in_user(to, from, n);
+ return n;
+}
+#endif
+
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
@@ -12,7 +203,6 @@ static __always_inline void pagefault_disabled_inc(void)
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
- WARN_ON(current->pagefault_disabled < 0);
}
/*
@@ -67,12 +257,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
return __copy_from_user_inatomic(to, from, n);
}
-static inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
-{
- return __copy_from_user(to, from, n);
-}
-
#endif /* ARCH_HAS_NOCACHE_UACCESS */
/*
diff --git a/include/linux/udp.h b/include/linux/udp.h
index c0f530809d1f..6cb4061a720d 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -115,6 +115,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
-#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
+#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
#endif /* _LINUX_UDP_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 804e34c6f981..f2d36a3d3005 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@ struct iov_iter {
};
union {
unsigned long nr_segs;
- int idx;
+ struct {
+ int idx;
+ int start_idx;
+ };
};
};
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
+void iov_iter_revert(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 32c0e83d6239..3c85c81b0027 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -23,11 +23,13 @@ struct uio_map;
/**
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
- * @addr: address of the device's memory (phys_addr is used since
- * addr can be logical, virtual, or physical & phys_addr_t
- * should always be large enough to handle any of the
- * address types)
- * @size: size of IO
+ * @addr: address of the device's memory rounded to page
+ * size (phys_addr is used since addr can be
+ * logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of
+ * the address types)
+ * @offs: offset of device memory within the page
+ * @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
* @internal_addr: ioremap-ped version of addr, for driver internal use
* @map: for use by the UIO core only.
@@ -35,6 +37,7 @@ struct uio_map;
struct uio_mem {
const char *name;
phys_addr_t addr;
+ unsigned long offs;
resource_size_t size;
int memtype;
void __iomem *internal_addr;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7e68259360de..cb9fbd54386e 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -99,6 +99,76 @@ enum usb_interface_condition {
USB_INTERFACE_UNBINDING,
};
+int __must_check
+usb_find_common_endpoints(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out);
+
+int __must_check
+usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out);
+
+static inline int __must_check
+usb_find_bulk_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in)
+{
+ return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_bulk_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_out)
+{
+ return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_int_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_in)
+{
+ return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL);
+}
+
+static inline int __must_check
+usb_find_int_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_out)
+{
+ return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out);
+}
+
+static inline int __must_check
+usb_find_last_bulk_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in)
+{
+ return usb_find_common_endpoints_reverse(alt, bulk_in, NULL, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_last_bulk_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_out)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, bulk_out, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_last_int_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_in)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, NULL, int_in, NULL);
+}
+
+static inline int __must_check
+usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_out)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out);
+}
+
/**
* struct usb_interface - what usb device drivers talk to
* @altsetting: array of interface structures, one for each alternate
@@ -248,7 +318,7 @@ void usb_put_intf(struct usb_interface *intf);
* struct usb_interface (which persists only as long as its configuration
* is installed). The altsetting arrays can be accessed through these
* structures at any time, permitting comparison of configurations and
- * providing support for the /proc/bus/usb/devices pseudo-file.
+ * providing support for the /sys/kernel/debug/usb/devices pseudo-file.
*/
struct usb_interface_cache {
unsigned num_altsetting; /* number of alternate settings */
@@ -354,6 +424,7 @@ struct usb_devmap {
*/
struct usb_bus {
struct device *controller; /* host/master side hardware */
+ struct device *sysdev; /* as seen from firmware or bus */
int busnum; /* Bus number (in order of reg) */
const char *bus_name; /* stable id (PCI slot_name etc) */
u8 uses_dma; /* Does the host controller use DMA? */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4616a49a1c2e..f665d2ceac20 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -451,6 +451,7 @@ static inline struct usb_composite_driver *to_cdriver(
* sure doing that won't hurt too much.
*
* One notion for how to handle Wireless USB devices involves:
+ *
* (a) a second gadget here, discovery mechanism TBD, but likely
* needing separate "register/unregister WUSB gadget" calls;
* (b) updates to usb_gadget to include flags "is it wireless",
@@ -503,8 +504,9 @@ struct usb_composite_dev {
/* protects deactivations and delayed_status counts*/
spinlock_t lock;
- unsigned setup_pending:1;
- unsigned os_desc_pending:1;
+ /* public: */
+ unsigned int setup_pending:1;
+ unsigned int os_desc_pending:1;
};
extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e4516e9ded0f..fbc22a39e7bc 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -188,7 +188,7 @@ struct usb_ep_caps {
* @caps:The structure describing types and directions supported by endoint.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
- * the endpoint descriptor used to configure the endpoint.
+ * the endpoint descriptor used to configure the endpoint.
* @maxpacket_limit:The maximum packet size value which can be handled by this
* endpoint. It's set once by UDC driver when endpoint is initialized, and
* should not be changed. Should not be confused with maxpacket.
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 40edf6a8533e..50398b69ca44 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -148,6 +148,7 @@ struct usb_hcd {
unsigned rh_registered:1;/* is root hub registered? */
unsigned rh_pollable:1; /* may we poll the root hub? */
unsigned msix_enabled:1; /* driver has MSI-X enabled? */
+ unsigned msi_enabled:1; /* driver has MSI enabled? */
unsigned remove_phy:1; /* auto-remove USB phy */
/* The next flag is a stopgap, to be removed when all the HCDs
@@ -437,6 +438,9 @@ extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_interface *new_alt);
extern int usb_hcd_get_frame_number(struct usb_device *udev);
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+ struct device *sysdev, struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name);
extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
@@ -453,7 +457,7 @@ extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
@@ -466,7 +470,7 @@ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
#ifdef CONFIG_PM
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_USB_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
void usb_init_pool_max(void);
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 5ff9032ee1b4..4031f47629ec 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -18,6 +18,7 @@ int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
struct device_node *usb_of_get_child_node(struct device_node *parent,
int portnum);
+struct device *usb_of_get_companion_dev(struct device *dev);
#else
static inline enum usb_dr_mode
of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
@@ -38,6 +39,10 @@ static inline struct device_node *usb_of_get_child_node
{
return NULL;
}
+static inline struct device *usb_of_get_companion_dev(struct device *dev)
+{
+ return NULL;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index 7a0350535cb1..a0a8f878503c 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -21,21 +21,6 @@
#include <linux/mutex.h>
#include <linux/errno.h>
-#undef VERBOSE
-
-#ifdef VERBOSE
-#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \
- __func__, ## args)
-#else
-#define VDBG(stuff...) do {} while (0)
-#endif
-
-#ifdef VERBOSE
-#define MPC_LOC printk("Current Location [%s]:[%d]\n", __FILE__, __LINE__)
-#else
-#define MPC_LOC do {} while (0)
-#endif
-
#define PROTO_UNDEF (0)
#define PROTO_HOST (1)
#define PROTO_GADGET (2)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 704a1ab8240c..e2f0ab07eea5 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -20,7 +20,7 @@
#include <linux/kfifo.h>
/* The maximum number of ports one device can grab at once */
-#define MAX_NUM_PORTS 8
+#define MAX_NUM_PORTS 16
/* parity check flag */
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
@@ -159,10 +159,10 @@ struct usb_serial {
unsigned char minors_reserved:1;
unsigned char num_ports;
unsigned char num_port_pointers;
- char num_interrupt_in;
- char num_interrupt_out;
- char num_bulk_in;
- char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
struct usb_serial_port *port[MAX_NUM_PORTS];
struct kref kref;
struct mutex disc_mutex;
@@ -181,6 +181,17 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
serial->private = data;
}
+struct usb_serial_endpoints {
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+ struct usb_endpoint_descriptor *bulk_in[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *bulk_out[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *interrupt_in[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *interrupt_out[MAX_NUM_PORTS];
+};
+
/**
* usb_serial_driver - describes a usb serial driver
* @description: pointer to a string that describes this driver. This string
@@ -188,12 +199,17 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
* @id_table: pointer to a list of usb_device_id structures that define all
* of the devices this structure can support.
* @num_ports: the number of different ports this device will have.
+ * @num_bulk_in: minimum number of bulk-in endpoints
+ * @num_bulk_out: minimum number of bulk-out endpoints
+ * @num_interrupt_in: minimum number of interrupt-in endpoints
+ * @num_interrupt_out: minimum number of interrupt-out endpoints
* @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
* (0 = end-point size)
* @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
* @calc_num_ports: pointer to a function to determine how many ports this
- * device has dynamically. It will be called after the probe()
- * callback is called, but before attach()
+ * device has dynamically. It can also be used to verify the number of
+ * endpoints or to modify the port-endpoint mapping. It will be called
+ * after the probe() callback is called, but before attach().
* @probe: pointer to the driver's probe function.
* This will be called when the device is inserted into the system,
* but before the device has been fully initialized by the usb_serial
@@ -227,19 +243,26 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
struct usb_serial_driver {
const char *description;
const struct usb_device_id *id_table;
- char num_ports;
struct list_head driver_list;
struct device_driver driver;
struct usb_driver *usb_driver;
struct usb_dynids dynids;
+ unsigned char num_ports;
+
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+
size_t bulk_in_size;
size_t bulk_out_size;
int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
int (*attach)(struct usb_serial *serial);
- int (*calc_num_ports) (struct usb_serial *serial);
+ int (*calc_num_ports)(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
void (*disconnect)(struct usb_serial *serial);
void (*release)(struct usb_serial *serial);
@@ -356,7 +379,6 @@ extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
extern int usb_serial_bus_register(struct usb_serial_driver *device);
extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
-extern struct usb_serial_driver usb_serial_generic_device;
extern struct bus_type usb_serial_bus_type;
extern struct tty_driver *usb_serial_tty_driver;
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
new file mode 100644
index 000000000000..ec78204964ab
--- /dev/null
+++ b/include/linux/usb/typec.h
@@ -0,0 +1,243 @@
+
+#ifndef __LINUX_USB_TYPEC_H
+#define __LINUX_USB_TYPEC_H
+
+#include <linux/types.h>
+
+/* XXX: Once we have a header for USB Power Delivery, this belongs there */
+#define ALTMODE_MAX_MODES 6
+
+/* USB Type-C Specification releases */
+#define USB_TYPEC_REV_1_0 0x100 /* 1.0 */
+#define USB_TYPEC_REV_1_1 0x110 /* 1.1 */
+#define USB_TYPEC_REV_1_2 0x120 /* 1.2 */
+
+struct typec_altmode;
+struct typec_partner;
+struct typec_cable;
+struct typec_plug;
+struct typec_port;
+
+struct fwnode_handle;
+
+enum typec_port_type {
+ TYPEC_PORT_DFP,
+ TYPEC_PORT_UFP,
+ TYPEC_PORT_DRP,
+};
+
+enum typec_plug_type {
+ USB_PLUG_NONE,
+ USB_PLUG_TYPE_A,
+ USB_PLUG_TYPE_B,
+ USB_PLUG_TYPE_C,
+ USB_PLUG_CAPTIVE,
+};
+
+enum typec_data_role {
+ TYPEC_DEVICE,
+ TYPEC_HOST,
+};
+
+enum typec_role {
+ TYPEC_SINK,
+ TYPEC_SOURCE,
+};
+
+enum typec_pwr_opmode {
+ TYPEC_PWR_MODE_USB,
+ TYPEC_PWR_MODE_1_5A,
+ TYPEC_PWR_MODE_3_0A,
+ TYPEC_PWR_MODE_PD,
+};
+
+enum typec_accessory {
+ TYPEC_ACCESSORY_NONE,
+ TYPEC_ACCESSORY_AUDIO,
+ TYPEC_ACCESSORY_DEBUG,
+};
+
+#define TYPEC_MAX_ACCESSORY 3
+
+/*
+ * struct usb_pd_identity - USB Power Delivery identity data
+ * @id_header: ID Header VDO
+ * @cert_stat: Cert Stat VDO
+ * @product: Product VDO
+ *
+ * USB power delivery Discover Identity command response data.
+ *
+ * REVISIT: This is USB Power Delivery specific information, so this structure
+ * probable belongs to USB Power Delivery header file once we have them.
+ */
+struct usb_pd_identity {
+ u32 id_header;
+ u32 cert_stat;
+ u32 product;
+};
+
+int typec_partner_set_identity(struct typec_partner *partner);
+int typec_cable_set_identity(struct typec_cable *cable);
+
+/*
+ * struct typec_mode_desc - Individual Mode of an Alternate Mode
+ * @index: Index of the Mode within the SVID
+ * @vdo: VDO returned by Discover Modes USB PD command
+ * @desc: Optional human readable description of the mode
+ * @roles: Only for ports. DRP if the mode is available in both roles
+ *
+ * Description of a mode of an Alternate Mode which a connector, cable plug or
+ * partner supports. Every mode will have it's own sysfs group. The details are
+ * the VDO returned by discover modes command, description for the mode and
+ * active flag telling has the mode being entered or not.
+ */
+struct typec_mode_desc {
+ int index;
+ u32 vdo;
+ char *desc;
+ /* Only used with ports */
+ enum typec_port_type roles;
+};
+
+/*
+ * struct typec_altmode_desc - USB Type-C Alternate Mode Descriptor
+ * @svid: Standard or Vendor ID
+ * @n_modes: Number of modes
+ * @modes: Array of modes supported by the Alternate Mode
+ *
+ * Representation of an Alternate Mode that has SVID assigned by USB-IF. The
+ * array of modes will list the modes of a particular SVID that are supported by
+ * a connector, partner of a cable plug.
+ */
+struct typec_altmode_desc {
+ u16 svid;
+ int n_modes;
+ struct typec_mode_desc modes[ALTMODE_MAX_MODES];
+};
+
+struct typec_altmode
+*typec_partner_register_altmode(struct typec_partner *partner,
+ struct typec_altmode_desc *desc);
+struct typec_altmode
+*typec_plug_register_altmode(struct typec_plug *plug,
+ struct typec_altmode_desc *desc);
+struct typec_altmode
+*typec_port_register_altmode(struct typec_port *port,
+ struct typec_altmode_desc *desc);
+void typec_unregister_altmode(struct typec_altmode *altmode);
+
+struct typec_port *typec_altmode2port(struct typec_altmode *alt);
+
+void typec_altmode_update_active(struct typec_altmode *alt, int mode,
+ bool active);
+
+enum typec_plug_index {
+ TYPEC_PLUG_SOP_P,
+ TYPEC_PLUG_SOP_PP,
+};
+
+/*
+ * struct typec_plug_desc - USB Type-C Cable Plug Descriptor
+ * @index: SOP Prime for the plug connected to DFP and SOP Double Prime for the
+ * plug connected to UFP
+ *
+ * Represents USB Type-C Cable Plug.
+ */
+struct typec_plug_desc {
+ enum typec_plug_index index;
+};
+
+/*
+ * struct typec_cable_desc - USB Type-C Cable Descriptor
+ * @type: The plug type from USB PD Cable VDO
+ * @active: Is the cable active or passive
+ * @identity: Result of Discover Identity command
+ *
+ * Represents USB Type-C Cable attached to USB Type-C port.
+ */
+struct typec_cable_desc {
+ enum typec_plug_type type;
+ unsigned int active:1;
+ struct usb_pd_identity *identity;
+};
+
+/*
+ * struct typec_partner_desc - USB Type-C Partner Descriptor
+ * @usb_pd: USB Power Delivery support
+ * @accessory: Audio, Debug or none.
+ * @identity: Discover Identity command data
+ *
+ * Details about a partner that is attached to USB Type-C port. If @identity
+ * member exists when partner is registered, a directory named "identity" is
+ * created to sysfs for the partner device.
+ */
+struct typec_partner_desc {
+ unsigned int usb_pd:1;
+ enum typec_accessory accessory;
+ struct usb_pd_identity *identity;
+};
+
+/*
+ * struct typec_capability - USB Type-C Port Capabilities
+ * @role: DFP (Host-only), UFP (Device-only) or DRP (Dual Role)
+ * @revision: USB Type-C Specification release. Binary coded decimal
+ * @pd_revision: USB Power Delivery Specification revision if supported
+ * @prefer_role: Initial role preference
+ * @accessory: Supported Accessory Modes
+ * @fwnode: Optional fwnode of the port
+ * @try_role: Set data role preference for DRP port
+ * @dr_set: Set Data Role
+ * @pr_set: Set Power Role
+ * @vconn_set: Set VCONN Role
+ * @activate_mode: Enter/exit given Alternate Mode
+ *
+ * Static capabilities of a single USB Type-C port.
+ */
+struct typec_capability {
+ enum typec_port_type type;
+ u16 revision; /* 0120H = "1.2" */
+ u16 pd_revision; /* 0300H = "3.0" */
+ int prefer_role;
+ enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
+
+ struct fwnode_handle *fwnode;
+
+ int (*try_role)(const struct typec_capability *,
+ int role);
+
+ int (*dr_set)(const struct typec_capability *,
+ enum typec_data_role);
+ int (*pr_set)(const struct typec_capability *,
+ enum typec_role);
+ int (*vconn_set)(const struct typec_capability *,
+ enum typec_role);
+
+ int (*activate_mode)(const struct typec_capability *,
+ int mode, int activate);
+};
+
+/* Specific to try_role(). Indicates the user want's to clear the preference. */
+#define TYPEC_NO_PREFERRED_ROLE (-1)
+
+struct typec_port *typec_register_port(struct device *parent,
+ const struct typec_capability *cap);
+void typec_unregister_port(struct typec_port *port);
+
+struct typec_partner *typec_register_partner(struct typec_port *port,
+ struct typec_partner_desc *desc);
+void typec_unregister_partner(struct typec_partner *partner);
+
+struct typec_cable *typec_register_cable(struct typec_port *port,
+ struct typec_cable_desc *desc);
+void typec_unregister_cable(struct typec_cable *cable);
+
+struct typec_plug *typec_register_plug(struct typec_cable *cable,
+ struct typec_plug_desc *desc);
+void typec_unregister_plug(struct typec_plug *plug);
+
+void typec_set_data_role(struct typec_port *port, enum typec_data_role role);
+void typec_set_pwr_role(struct typec_port *port, enum typec_role role);
+void typec_set_vconn_role(struct typec_port *port, enum typec_role role);
+void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
+
+#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 6e0ce8c7b8cb..97116379db5f 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -64,6 +64,8 @@ struct usbnet {
struct usb_anchor deferred;
struct tasklet_struct bh;
+ struct pcpu_sw_netstats __percpu *stats64;
+
struct work_struct kevent;
unsigned long flags;
# define EVENT_TX_HALT 0
@@ -204,6 +206,7 @@ struct cdc_state {
};
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
+extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_status(struct usbnet *, struct urb *);
@@ -261,10 +264,10 @@ extern void usbnet_pause_rx(struct usbnet *);
extern void usbnet_resume_rx(struct usbnet *);
extern void usbnet_purge_paused_rxq(struct usbnet *);
-extern int usbnet_get_settings(struct net_device *net,
- struct ethtool_cmd *cmd);
-extern int usbnet_set_settings(struct net_device *net,
- struct ethtool_cmd *cmd);
+extern int usbnet_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd);
+extern int usbnet_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd);
extern u32 usbnet_get_link(struct net_device *net);
extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
@@ -278,5 +281,7 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
extern void usbnet_update_max_qlen(struct usbnet *dev);
+extern void usbnet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
new file mode 100644
index 000000000000..80c1cca1f529
--- /dev/null
+++ b/include/linux/usb/xhci-dbgp.h
@@ -0,0 +1,29 @@
+/*
+ * Standalone xHCI debug capability driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBGP_H
+#define __LINUX_XHCI_DBGP_H
+
+#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
+int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_setup_hardware(void);
+void __init early_xdbc_register_console(void);
+#else
+static inline int __init early_xdbc_setup_hardware(void)
+{
+ return -ENODEV;
+}
+static inline void __init early_xdbc_register_console(void)
+{
+}
+#endif /* CONFIG_EARLY_PRINTK_USB_XDBC */
+#endif /* __LINUX_XHCI_DBGP_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 04b0d3f95043..28b0e965360f 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -44,6 +44,12 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
void *data,
gfp_t gfp);
+int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ void *ctx,
+ gfp_t gfp);
+
int virtqueue_add_sgs(struct virtqueue *vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
@@ -59,6 +65,9 @@ bool virtqueue_notify(struct virtqueue *vq);
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
+ void **ctx);
+
void virtqueue_disable_cb(struct virtqueue *vq);
bool virtqueue_enable_cb(struct virtqueue *vq);
@@ -156,9 +165,13 @@ int virtio_device_restore(struct virtio_device *dev);
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
* @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @scan: optional function to call after successful probe; intended
+ * for virtio-scsi to invoke a scan.
* @remove: the function to call when a device is removed.
* @config_changed: optional function to call when the device configuration
* changes; may be called in interrupt context.
+ * @freeze: optional function to call during suspend/hibernation.
+ * @restore: optional function to call on resume.
*/
struct virtio_driver {
struct device_driver driver;
@@ -167,6 +180,7 @@ struct virtio_driver {
unsigned int feature_table_size;
const unsigned int *feature_table_legacy;
unsigned int feature_table_size_legacy;
+ int (*validate)(struct virtio_device *dev);
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 8355bab175e1..0133d8a12ccd 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -72,7 +72,8 @@ struct virtio_config_ops {
void (*reset)(struct virtio_device *vdev);
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], struct irq_affinity *desc);
+ const char * const names[], const bool *ctx,
+ struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
@@ -173,12 +174,32 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *callbacks[] = { c };
const char *names[] = { n };
struct virtqueue *vq;
- int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL);
+ int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
+ NULL);
if (err < 0)
return ERR_PTR(err);
return vq;
}
+static inline
+int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char * const names[],
+ struct irq_affinity *desc)
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
+}
+
+static inline
+int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char * const names[], const bool *ctx,
+ struct irq_affinity *desc)
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
+ desc);
+}
+
/**
* virtio_device_ready - enable vq use in probe function
* @vdev: the device
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index e8d36938f09a..270cfa81830e 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -71,6 +71,7 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
+ bool ctx,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
@@ -80,6 +81,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring vring,
struct virtio_device *vdev,
bool weak_barriers,
+ bool ctx,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name);
@@ -93,6 +95,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
+ bool ctx,
void *pages,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 584f9a647ad4..ab13f0743da8 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -153,5 +153,6 @@ void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index a80b7b59cf33..d84ae90ccd5c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -25,7 +25,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
FOR_ALL_ZONES(ALLOCSTALL),
FOR_ALL_ZONES(PGSCAN_SKIP),
- PGFREE, PGACTIVATE, PGDEACTIVATE,
+ PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index d68edffbf142..2d92dd002abd 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -80,6 +80,17 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
+#ifndef CONFIG_MMU
+extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
+static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
+ gfp_t flags, void *caller)
+{
+ return __vmalloc_node_flags(size, node, flags);
+}
+#else
+extern void *__vmalloc_node_flags_caller(unsigned long size,
+ int node, gfp_t flags, void *caller);
+#endif
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vme.h b/include/linux/vme.h
index ec5e8bf6118e..25874da3f2e1 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -92,7 +92,7 @@ extern struct bus_type vme_bus_type;
#define VME_SLOT_ALL -2
/**
- * Structure representing a VME device
+ * struct vme_dev - Structure representing a VME device
* @num: The device number
* @bridge: Pointer to the bridge device this device is on
* @dev: Internal device structure
@@ -107,6 +107,16 @@ struct vme_dev {
struct list_head bridge_list;
};
+/**
+ * struct vme_driver - Structure representing a VME driver
+ * @name: Driver name, should be unique among VME drivers and usually the same
+ * as the module name.
+ * @match: Callback used to determine whether probe should be run.
+ * @probe: Callback for device binding, called when new device is detected.
+ * @remove: Callback, called on device removal.
+ * @driver: Underlying generic device driver structure.
+ * @devices: List of VME devices (struct vme_dev) associated with this driver.
+ */
struct vme_driver {
const char *name;
int (*match)(struct vme_dev *);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index bde063cefd04..c102ef65cb64 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -608,8 +608,13 @@ static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
return fn(arg);
}
+static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+{
+ return fn(arg);
+}
#else
long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a3c0cbd7c888..d5815794416c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -237,6 +237,7 @@ static inline void inode_attach_wb(struct inode *inode, struct page *page)
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
+ WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}