aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atomic-arch-fallback.h2291
-rw-r--r--include/linux/atomic-fallback.h346
-rw-r--r--include/linux/atomic.h11
-rw-r--r--include/linux/backlight.h1
-rw-r--r--include/linux/bch.h11
-rw-r--r--include/linux/binfmts.h48
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/bsearch.h26
-rw-r--r--include/linux/cache.h10
-rw-r--r--include/linux/can/skb.h2
-rw-r--r--include/linux/cb710.h2
-rw-r--r--include/linux/ceph/libceph.h13
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/ceph/osd_client.h8
-rw-r--r--include/linux/ceph/osdmap.h19
-rw-r--r--include/linux/ceph/rados.h14
-rw-r--r--include/linux/clk/tegra.h27
-rw-r--r--include/linux/compaction.h9
-rw-r--r--include/linux/compiler-clang.h11
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h157
-rw-r--r--include/linux/compiler_types.h79
-rw-r--r--include/linux/context_tracking.h6
-rw-r--r--include/linux/context_tracking_state.h6
-rw-r--r--include/linux/coresight.h32
-rw-r--r--include/linux/cpu_cooling.h12
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/crash_dump.h3
-rw-r--r--include/linux/crush/crush.h14
-rw-r--r--include/linux/dax.h1
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/dev_printk.h6
-rw-r--r--include/linux/device-mapper.h9
-rw-r--r--include/linux/dm-bufio.h12
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-mapping.h86
-rw-r--r--include/linux/dma-noncoherent.h2
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/dynamic_debug.h2
-rw-r--r--include/linux/edac.h8
-rw-r--r--include/linux/elfnote.h2
-rw-r--r--include/linux/ethtool_netlink.h2
-rw-r--r--include/linux/fiemap.h25
-rw-r--r--include/linux/firmware.h1
-rw-r--r--include/linux/firmware/imx/sci.h1
-rw-r--r--include/linux/firmware/imx/types.h65
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h49
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h71
-rw-r--r--include/linux/firmware/trusted_foundations.h1
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h237
-rw-r--r--include/linux/fpga/adi-axi-common.h6
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/fsl/bestcomm/bestcomm.h2
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/gfp.h7
-rw-r--r--include/linux/gpio/driver.h48
-rw-r--r--include/linux/gpio/machine.h17
-rw-r--r--include/linux/gpio/regmap.h86
-rw-r--r--include/linux/greybus/greybus_protocols.h44
-rw-r--r--include/linux/hardirq.h41
-rw-r--r--include/linux/highmem.h81
-rw-r--r--include/linux/hmm.h2
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h18
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/i2c-smbus.h8
-rw-r--r--include/linux/i2c.h8
-rw-r--r--include/linux/idle_inject.h4
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h58
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h64
-rw-r--r--include/linux/iio/buffer-dma.h2
-rw-r--r--include/linux/iio/buffer-dmaengine.h3
-rw-r--r--include/linux/iio/buffer_impl.h9
-rw-r--r--include/linux/iio/consumer.h18
-rw-r--r--include/linux/iio/hw-consumer.h1
-rw-r--r--include/linux/iio/iio.h10
-rw-r--r--include/linux/iio/imu/adis.h87
-rw-r--r--include/linux/iio/kfifo_buf.h1
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/triggered_buffer.h2
-rw-r--r--include/linux/ima.h7
-rw-r--r--include/linux/input/gp2ap002a00f.h23
-rw-r--r--include/linux/input/mt.h5
-rw-r--r--include/linux/instrumented.h109
-rw-r--r--include/linux/intel-iommu.h82
-rw-r--r--include/linux/intel-svm.h94
-rw-r--r--include/linux/interconnect.h31
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/iomap.h2
-rw-r--r--include/linux/iommu.h79
-rw-r--r--include/linux/ioport.h7
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irqflags.h4
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kallsyms.h4
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h2
-rw-r--r--include/linux/kcsan-checks.h430
-rw-r--r--include/linux/kcsan.h59
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key.h33
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kthread.h6
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/livepatch.h17
-rw-r--r--include/linux/lsm_audit.h1
-rw-r--r--include/linux/lsm_hook_defs.h14
-rw-r--r--include/linux/lsm_hooks.h79
-rw-r--r--include/linux/memblock.h15
-rw-r--r--include/linux/memcontrol.h66
-rw-r--r--include/linux/memory_hotplug.h10
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mfd/core.h4
-rw-r--r--include/linux/mfd/mp2629.h26
-rw-r--r--include/linux/mfd/mt6358/core.h158
-rw-r--r--include/linux/mfd/mt6358/registers.h282
-rw-r--r--include/linux/mfd/mt6360.h240
-rw-r--r--include/linux/mfd/mt6397/core.h5
-rw-r--r--include/linux/mfd/mt6397/rtc.h9
-rw-r--r--include/linux/mfd/stmfx.h1
-rw-r--r--include/linux/mhi.h23
-rw-r--r--include/linux/mlx4/device.h22
-rw-r--r--include/linux/mlx5/mlx5_ifc.h9
-rw-r--r--include/linux/mlx5/qp.h68
-rw-r--r--include/linux/mm.h85
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmap_lock.h90
-rw-r--r--include/linux/mmu_context.h5
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/mmzone.h60
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h8
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/cfi.h6
-rw-r--r--include/linux/mtd/mtd.h7
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h131
-rw-r--r--include/linux/mtd/spi-nor.h24
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/ntb.h6
-rw-r--r--include/linux/of_reserved_mem.h12
-rw-r--r--include/linux/padata.h43
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/parport.h16
-rw-r--r--include/linux/pci-acpi.h18
-rw-r--r--include/linux/pci-ats.h3
-rw-r--r--include/linux/pci-ecam.h25
-rw-r--r--include/linux/pci-epc.h38
-rw-r--r--include/linux/pci.h51
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--include/linux/pgtable.h1438
-rw-r--r--include/linux/phy/omap_usb.h69
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/pid_namespace.h12
-rw-r--r--include/linux/pipe_fs_i.h27
-rw-r--r--include/linux/platform_data/clk-integrator.h2
-rw-r--r--include/linux/platform_data/gpio-dwapb.h1
-rw-r--r--include/linux/platform_data/i2c-pxa.h48
-rw-r--r--include/linux/platform_data/mtd-davinci.h2
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h2
-rw-r--r--include/linux/pm_opp.h18
-rw-r--r--include/linux/power_supply.h13
-rw-r--r--include/linux/printk.h9
-rw-r--r--include/linux/proc_fs.h32
-rw-r--r--include/linux/property.h1
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qed_rdma_if.h1
-rw-r--r--include/linux/ras.h5
-rw-r--r--include/linux/rculist.h21
-rw-r--r--include/linux/regset.h2
-rw-r--r--include/linux/remoteproc.h19
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rpmsg/qcom_glink.h3
-rw-r--r--include/linux/rtsx_pci.h25
-rw-r--r--include/linux/scatterlist.h50
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/sched/debug.h3
-rw-r--r--include/linux/sched/mm.h10
-rw-r--r--include/linux/sched/signal.h11
-rw-r--r--include/linux/sched/sysctl.h7
-rw-r--r--include/linux/scmi_protocol.h6
-rw-r--r--include/linux/scpi_protocol.h6
-rw-r--r--include/linux/sctp.h36
-rw-r--r--include/linux/security.h54
-rw-r--r--include/linux/seq_file.h19
-rw-r--r--include/linux/seqlock.h51
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/set_memory.h2
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h20
-rw-r--r--include/linux/soundwire/sdw.h32
-rw-r--r--include/linux/soundwire/sdw_type.h9
-rw-r--r--include/linux/spi/l4f00242t03.h17
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/string.h60
-rw-r--r--include/linux/sunrpc/auth.h5
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h6
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/sunrpc/svcsock.h6
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/tee_drv.h17
-rw-r--r--include/linux/thermal.h84
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/tifm.h2
-rw-r--r--include/linux/u64_stats_sync.h43
-rw-r--r--include/linux/uacce.h34
-rw-r--r--include/linux/uaccess.h74
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/gadget.h8
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/typec.h1
-rw-r--r--include/linux/vdpa.h16
-rw-r--r--include/linux/vexpress.h30
-rw-r--r--include/linux/vfio.h4
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/linux/vringh.h6
-rw-r--r--include/linux/watch_queue.h127
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/xarray.h4
241 files changed, 8042 insertions, 1894 deletions
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
new file mode 100644
index 000000000000..bcb6aa27cfa6
--- /dev/null
+++ b/include/linux/atomic-arch-fallback.h
@@ -0,0 +1,2291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_relaxed arch_xchg
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+ arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+ arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+ arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+ int ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+ return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+ return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+ return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+ return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+ return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, c = arch_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+ arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+ arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ s64 ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+ return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+ return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+ return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 dec, c = arch_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// 90cd26cfd69d2250303d654955a0cc12620fb91b
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
index a7d240e465c0..2c4927bf7b8d 100644
--- a/include/linux/atomic-fallback.h
+++ b/include/linux/atomic-fallback.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
+#include <linux/compiler.h>
+
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
@@ -76,7 +78,7 @@
#endif /* cmpxchg64_relaxed */
#ifndef atomic_read_acquire
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v)
#endif
#ifndef atomic_set_release
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
smp_store_release(&(v)->counter, i);
@@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i)
#else /* atomic_add_return_relaxed */
#ifndef atomic_add_return_acquire
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
int ret = atomic_add_return_relaxed(i, v);
@@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_add_return_release
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_add_return
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
int ret;
@@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v)
#else /* atomic_fetch_add_relaxed */
#ifndef atomic_fetch_add_acquire
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_add_relaxed(i, v);
@@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add_release
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
int ret;
@@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v)
#else /* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_acquire
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
int ret = atomic_sub_return_relaxed(i, v);
@@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return_release
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
int ret;
@@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v)
#else /* atomic_fetch_sub_relaxed */
#ifndef atomic_fetch_sub_acquire
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_sub_relaxed(i, v);
@@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub_release
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
int ret;
@@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v)
#endif /* atomic_fetch_sub_relaxed */
#ifndef atomic_inc
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
atomic_add(1, v);
@@ -278,7 +280,7 @@ atomic_inc(atomic_t *v)
#endif /* atomic_inc_return */
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
return atomic_add_return(1, v);
@@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v)
#endif
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
return atomic_add_return_acquire(1, v);
@@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
return atomic_add_return_release(1, v);
@@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return_relaxed
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
return atomic_add_return_relaxed(1, v);
@@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v)
#else /* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
int ret = atomic_inc_return_relaxed(v);
@@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
int ret;
@@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v)
#endif /* atomic_fetch_inc */
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
return atomic_fetch_add(1, v);
@@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
return atomic_fetch_add_acquire(1, v);
@@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
return atomic_fetch_add_release(1, v);
@@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_relaxed
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
return atomic_fetch_add_relaxed(1, v);
@@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
#else /* atomic_fetch_inc_relaxed */
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
int ret = atomic_fetch_inc_relaxed(v);
@@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
__atomic_release_fence();
@@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
int ret;
@@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v)
#endif /* atomic_fetch_inc_relaxed */
#ifndef atomic_dec
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
atomic_sub(1, v);
@@ -449,7 +451,7 @@ atomic_dec(atomic_t *v)
#endif /* atomic_dec_return */
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
return atomic_sub_return(1, v);
@@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v)
#endif
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
return atomic_sub_return_acquire(1, v);
@@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
return atomic_sub_return_release(1, v);
@@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return_relaxed
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
return atomic_sub_return_relaxed(1, v);
@@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v)
#else /* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
int ret = atomic_dec_return_relaxed(v);
@@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
int ret;
@@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v)
#endif /* atomic_fetch_dec */
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
return atomic_fetch_sub(1, v);
@@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
return atomic_fetch_sub_acquire(1, v);
@@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
return atomic_fetch_sub_release(1, v);
@@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_relaxed
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
return atomic_fetch_sub_relaxed(1, v);
@@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
#else /* atomic_fetch_dec_relaxed */
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
int ret = atomic_fetch_dec_relaxed(v);
@@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
__atomic_release_fence();
@@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
int ret;
@@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v)
#else /* atomic_fetch_and_relaxed */
#ifndef atomic_fetch_and_acquire
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_and_relaxed(i, v);
@@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and_release
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
int ret;
@@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v)
#endif /* atomic_fetch_and_relaxed */
#ifndef atomic_andnot
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
@@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v)
#endif /* atomic_fetch_andnot */
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
return atomic_fetch_and(~i, v);
@@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
return atomic_fetch_and_acquire(~i, v);
@@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
return atomic_fetch_and_release(~i, v);
@@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_relaxed
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
return atomic_fetch_and_relaxed(~i, v);
@@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
#else /* atomic_fetch_andnot_relaxed */
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_andnot_relaxed(i, v);
@@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
int ret;
@@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#else /* atomic_fetch_or_relaxed */
#ifndef atomic_fetch_or_acquire
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_or_relaxed(i, v);
@@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or_release
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
int ret;
@@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v)
#else /* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_acquire
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_xor_relaxed(i, v);
@@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor_release
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
int ret;
@@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v)
#else /* atomic_xchg_relaxed */
#ifndef atomic_xchg_acquire
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
int ret = atomic_xchg_relaxed(v, i);
@@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
#endif
#ifndef atomic_xchg_release
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
__atomic_release_fence();
@@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i)
#endif
#ifndef atomic_xchg
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
int ret;
@@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i)
#else /* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_acquire
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
int ret = atomic_cmpxchg_relaxed(v, old, new);
@@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg_release
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
__atomic_release_fence();
@@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
#endif /* atomic_try_cmpxchg */
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
#else /* atomic_try_cmpxchg_relaxed */
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
@@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
__atomic_release_fence();
@@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
bool ret;
@@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
return atomic_sub_return(i, v) == 0;
@@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
return atomic_dec_return(v) == 0;
@@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
return atomic_inc_return(v) == 0;
@@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
@@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
return atomic_fetch_add_unless(v, a, u) != u;
@@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
return atomic_add_unless(v, 1, 0);
@@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v)
#endif
#ifndef atomic_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
int c = atomic_read(v);
@@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v)
#endif
#ifndef atomic_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
int c = atomic_read(v);
@@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v)
#endif
#ifndef atomic_dec_if_positive
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
int dec, c = atomic_read(v);
@@ -1178,15 +1180,12 @@ atomic_dec_if_positive(atomic_t *v)
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#ifndef atomic64_read_acquire
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -1195,7 +1194,7 @@ atomic64_read_acquire(const atomic64_t *v)
#endif
#ifndef atomic64_set_release
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
smp_store_release(&(v)->counter, i);
@@ -1210,7 +1209,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
#else /* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_acquire
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_add_return_relaxed(i, v);
@@ -1221,7 +1220,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return_release
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1231,7 +1230,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1252,7 +1251,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_add_relaxed */
#ifndef atomic64_fetch_add_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_add_relaxed(i, v);
@@ -1263,7 +1262,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add_release
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1273,7 +1272,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1294,7 +1293,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
#else /* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_acquire
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_sub_return_relaxed(i, v);
@@ -1305,7 +1304,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return_release
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1315,7 +1314,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1336,7 +1335,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_fetch_sub_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_sub_relaxed(i, v);
@@ -1347,7 +1346,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub_release
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1357,7 +1356,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1372,7 +1371,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_inc
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
atomic64_add(1, v);
@@ -1388,7 +1387,7 @@ atomic64_inc(atomic64_t *v)
#endif /* atomic64_inc_return */
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
return atomic64_add_return(1, v);
@@ -1397,7 +1396,7 @@ atomic64_inc_return(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
return atomic64_add_return_acquire(1, v);
@@ -1406,7 +1405,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
return atomic64_add_return_release(1, v);
@@ -1415,7 +1414,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
return atomic64_add_return_relaxed(1, v);
@@ -1426,7 +1425,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
#else /* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_inc_return_relaxed(v);
@@ -1437,7 +1436,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1447,7 +1446,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
s64 ret;
@@ -1469,7 +1468,7 @@ atomic64_inc_return(atomic64_t *v)
#endif /* atomic64_fetch_inc */
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
return atomic64_fetch_add(1, v);
@@ -1478,7 +1477,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
return atomic64_fetch_add_acquire(1, v);
@@ -1487,7 +1486,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
return atomic64_fetch_add_release(1, v);
@@ -1496,7 +1495,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
return atomic64_fetch_add_relaxed(1, v);
@@ -1507,7 +1506,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
#else /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_inc_relaxed(v);
@@ -1518,7 +1517,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1528,7 +1527,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
s64 ret;
@@ -1543,7 +1542,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_dec
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
atomic64_sub(1, v);
@@ -1559,7 +1558,7 @@ atomic64_dec(atomic64_t *v)
#endif /* atomic64_dec_return */
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
return atomic64_sub_return(1, v);
@@ -1568,7 +1567,7 @@ atomic64_dec_return(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
return atomic64_sub_return_acquire(1, v);
@@ -1577,7 +1576,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
return atomic64_sub_return_release(1, v);
@@ -1586,7 +1585,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
return atomic64_sub_return_relaxed(1, v);
@@ -1597,7 +1596,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
#else /* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_dec_return_relaxed(v);
@@ -1608,7 +1607,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1618,7 +1617,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
s64 ret;
@@ -1640,7 +1639,7 @@ atomic64_dec_return(atomic64_t *v)
#endif /* atomic64_fetch_dec */
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
return atomic64_fetch_sub(1, v);
@@ -1649,7 +1648,7 @@ atomic64_fetch_dec(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
return atomic64_fetch_sub_acquire(1, v);
@@ -1658,7 +1657,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
return atomic64_fetch_sub_release(1, v);
@@ -1667,7 +1666,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
return atomic64_fetch_sub_relaxed(1, v);
@@ -1678,7 +1677,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
#else /* atomic64_fetch_dec_relaxed */
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_dec_relaxed(v);
@@ -1689,7 +1688,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1699,7 +1698,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
s64 ret;
@@ -1720,7 +1719,7 @@ atomic64_fetch_dec(atomic64_t *v)
#else /* atomic64_fetch_and_relaxed */
#ifndef atomic64_fetch_and_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_and_relaxed(i, v);
@@ -1731,7 +1730,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and_release
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1741,7 +1740,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1756,7 +1755,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_and_relaxed */
#ifndef atomic64_andnot
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
atomic64_and(~i, v);
@@ -1772,7 +1771,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_andnot */
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
return atomic64_fetch_and(~i, v);
@@ -1781,7 +1780,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_acquire(~i, v);
@@ -1790,7 +1789,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_release(~i, v);
@@ -1799,7 +1798,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_relaxed(~i, v);
@@ -1810,7 +1809,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
#else /* atomic64_fetch_andnot_relaxed */
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_andnot_relaxed(i, v);
@@ -1821,7 +1820,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1831,7 +1830,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1852,7 +1851,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#else /* atomic64_fetch_or_relaxed */
#ifndef atomic64_fetch_or_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_or_relaxed(i, v);
@@ -1863,7 +1862,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or_release
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1873,7 +1872,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1894,7 +1893,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
#else /* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_xor_relaxed(i, v);
@@ -1905,7 +1904,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor_release
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1915,7 +1914,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1936,7 +1935,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
#else /* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_acquire
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
s64 ret = atomic64_xchg_relaxed(v, i);
@@ -1947,7 +1946,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg_release
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
__atomic_release_fence();
@@ -1957,7 +1956,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
s64 ret;
@@ -1978,7 +1977,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
#else /* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_acquire
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
@@ -1989,7 +1988,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg_release
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
__atomic_release_fence();
@@ -1999,7 +1998,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
s64 ret;
@@ -2021,7 +2020,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
#endif /* atomic64_try_cmpxchg */
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2034,7 +2033,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2047,7 +2046,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2060,7 +2059,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2075,7 +2074,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
#else /* atomic64_try_cmpxchg_relaxed */
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
@@ -2086,7 +2085,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
__atomic_release_fence();
@@ -2096,7 +2095,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
bool ret;
@@ -2120,7 +2119,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return atomic64_sub_return(i, v) == 0;
@@ -2137,7 +2136,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
return atomic64_dec_return(v) == 0;
@@ -2154,7 +2153,7 @@ atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
return atomic64_inc_return(v) == 0;
@@ -2172,7 +2171,7 @@ atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
@@ -2190,7 +2189,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c = atomic64_read(v);
@@ -2215,7 +2214,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
return atomic64_fetch_add_unless(v, a, u) != u;
@@ -2231,7 +2230,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
return atomic64_add_unless(v, 1, 0);
@@ -2240,7 +2239,7 @@ atomic64_inc_not_zero(atomic64_t *v)
#endif
#ifndef atomic64_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2256,7 +2255,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
#endif
#ifndef atomic64_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2272,7 +2271,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
#endif
#ifndef atomic64_dec_if_positive
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
s64 dec, c = atomic64_read(v);
@@ -2288,8 +2287,5 @@ atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif
-#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 25de4a2804d70f57e994fe3b419148658bb5378a
+// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 4c0d009a46f0..571a11008ab5 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -25,6 +25,12 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@@ -71,7 +77,12 @@
__ret; \
})
+#ifdef ARCH_ATOMIC
+#include <linux/atomic-arch-fallback.h>
+#include <asm-generic/atomic-instrumented.h>
+#else
#include <linux/atomic-fallback.h>
+#endif
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index c7d6b2e8c3b5..56e4580d4f55 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -190,6 +190,7 @@ extern void backlight_force_update(struct backlight_device *bd,
extern int backlight_register_notifier(struct notifier_block *nb);
extern int backlight_unregister_notifier(struct notifier_block *nb);
extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+struct backlight_device *backlight_device_get_by_name(const char *name);
extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
diff --git a/include/linux/bch.h b/include/linux/bch.h
index aa765af85c38..85fdce83d4e2 100644
--- a/include/linux/bch.h
+++ b/include/linux/bch.h
@@ -33,6 +33,7 @@
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
+ * @swap_bits: swap bits within data and syndrome bytes
*/
struct bch_control {
unsigned int m;
@@ -51,16 +52,18 @@ struct bch_control {
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
+ bool swap_bits;
};
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits);
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index a345d9fed3d8..4a20b7517dd0 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -26,35 +26,27 @@ struct linux_binprm {
unsigned long p; /* current top of mem */
unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
+ /* Should an execfd be passed to userspace? */
+ have_execfd:1,
+
+ /* Use the creds of a script (see binfmt_misc) */
+ execfd_creds:1,
/*
- * True after the bprm_set_creds hook has been called once
- * (multiple calls can be made via prepare_binprm() for
- * binfmt_script/misc).
- */
- called_set_creds:1,
- /*
- * True if most recent call to the commoncaps bprm_set_creds
- * hook (due to multiple prepare_binprm() calls from the
- * binfmt_script/misc handlers) resulted in elevated
- * privileges.
- */
- cap_elevated:1,
- /*
- * Set by bprm_set_creds hook to indicate a privilege-gaining
- * exec has happened. Used to sanitize execution environment
- * and to set AT_SECURE auxv for glibc.
+ * Set by bprm_creds_for_exec hook to indicate a
+ * privilege-gaining exec has happened. Used to set
+ * AT_SECURE auxv for glibc.
*/
secureexec:1,
/*
- * Set by flush_old_exec, when exec_mmap has been called.
- * This is past the point of no return, when the
- * exec_update_mutex has been taken.
+ * Set when errors can no longer be returned to the
+ * original userspace.
*/
- called_exec_mmap:1;
+ point_of_no_return:1;
#ifdef __alpha__
unsigned int taso:1;
#endif
- unsigned int recursion_depth; /* only for search_binary_handler() */
+ struct file * executable; /* Executable to pass to the interpreter */
+ struct file * interpreter;
struct file * file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
@@ -65,7 +57,7 @@ struct linux_binprm {
of the time same as filename, but could be
different for binfmt_{misc,script} */
unsigned interp_flags;
- unsigned interp_data;
+ int execfd; /* File descriptor of the executable */
unsigned long loader, exec;
struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
@@ -76,10 +68,6 @@ struct linux_binprm {
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
-/* fd of the binary should be passed to the interpreter */
-#define BINPRM_FLAGS_EXECFD_BIT 1
-#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
-
/* filename of the binary will be inaccessible after exec */
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
@@ -123,10 +111,8 @@ static inline void insert_binfmt(struct linux_binfmt *fmt)
extern void unregister_binfmt(struct linux_binfmt *);
-extern int prepare_binprm(struct linux_binprm *);
extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *);
-extern int flush_old_exec(struct linux_binprm * bprm);
+extern int begin_new_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
extern void finalize_exec(struct linux_binprm *bprm);
extern void would_dump(struct linux_binprm *, struct file *);
@@ -144,9 +130,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
-extern int copy_strings_kernel(int argc, const char *const *argv,
- struct linux_binprm *bprm);
-extern void install_exec_creds(struct linux_binprm *bprm);
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 9acf654f0b19..99f2ac30b1d9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -72,7 +72,7 @@ static inline int get_bitmask_order(unsigned int count)
static __always_inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 8ed53d7524ea..e66b711d091e 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -4,7 +4,29 @@
#include <linux/types.h>
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- cmp_func_t cmp);
+static __always_inline
+void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
+{
+ const char *pivot;
+ int result;
+
+ while (num > 0) {
+ pivot = base + (num >> 1) * size;
+ result = cmp(key, pivot);
+
+ if (result == 0)
+ return (void *)pivot;
+
+ if (result > 0) {
+ base = pivot + size;
+ num--;
+ }
+ num >>= 1;
+ }
+
+ return NULL;
+}
+
+extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..1aa8009f6d06 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -15,8 +15,14 @@
/*
* __read_mostly is used to keep rarely changing variables out of frequently
- * updated cachelines. If an architecture doesn't support it, ignore the
- * hint.
+ * updated cachelines. Its use should be reserved for data that is used
+ * frequently in hot paths. Performance traces can help decide when to use
+ * this. You want __read_mostly data to be tightly packed, so that in the
+ * best case multiple frequently read variables for a hot path will be next
+ * to each other in order to reduce the number of cachelines needed to
+ * execute a critical path. We should be mindful and selective of its use.
+ * ie: if you're going to use it please supply a *good* justification in your
+ * commit log
*/
#ifndef __read_mostly
#define __read_mostly
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index a954def26c0d..900b9f4e0605 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -34,7 +34,7 @@
struct can_skb_priv {
int ifindex;
int skbcnt;
- struct can_frame cf[0];
+ struct can_frame cf[];
};
static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 60de3fedd3a7..405657a9a0d5 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -36,7 +36,7 @@ struct cb710_chip {
unsigned slot_mask;
unsigned slots;
spinlock_t irq_lock;
- struct cb710_slot slot[0];
+ struct cb710_slot slot[];
};
/* NOTE: cb710_chip.slots is modified only during device init/exit and
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 525b7c3f1c81..2247e71beb83 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -53,6 +53,8 @@ struct ceph_options {
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
+ u32 osd_req_flags; /* CEPH_OSD_FLAG_*, applied to each OSD request */
+
/*
* any type that can't be simply compared or doesn't need
* to be compared should go beyond this point,
@@ -64,6 +66,7 @@ struct ceph_options {
int num_mon;
char *name;
struct ceph_crypto_key *key;
+ struct rb_root crush_locs;
};
/*
@@ -188,7 +191,7 @@ static inline int calc_pages_for(u64 off, u64 len)
#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b))
#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \
-static void insert_##name(struct rb_root *root, type *t) \
+static bool __insert_##name(struct rb_root *root, type *t) \
{ \
struct rb_node **n = &root->rb_node; \
struct rb_node *parent = NULL; \
@@ -206,11 +209,17 @@ static void insert_##name(struct rb_root *root, type *t) \
else if (cmp > 0) \
n = &(*n)->rb_right; \
else \
- BUG(); \
+ return false; \
} \
\
rb_link_node(&t->nodefld, parent, n); \
rb_insert_color(&t->nodefld, root); \
+ return true; \
+} \
+static void __maybe_unused insert_##name(struct rb_root *root, type *t) \
+{ \
+ if (!__insert_##name(root, t)) \
+ BUG(); \
} \
static void erase_##name(struct rb_root *root, type *t) \
{ \
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index dbb8a6959a73..ce4ffeb384d7 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[0];
+ struct ceph_entity_inst mon_inst[];
};
struct ceph_mon_client;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 9d9f745b98a1..c60b59e9291b 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -8,6 +8,7 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -135,6 +136,7 @@ struct ceph_osd_req_op {
struct {
u64 expected_object_size;
u64 expected_write_size;
+ u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} alloc_hint;
struct {
u64 snapid;
@@ -164,6 +166,7 @@ struct ceph_osd_request_target {
bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool used_replica;
bool paused;
u32 epoch;
@@ -213,6 +216,8 @@ struct ceph_osd_request {
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
+ ktime_t r_start_latency; /* ktime_t */
+ ktime_t r_end_latency; /* ktime_t */
int r_attempts;
u32 r_map_dne_bound;
@@ -468,7 +473,8 @@ extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
- u64 expected_write_size);
+ u64 expected_write_size,
+ u32 flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 5e601975745f..3f4498fef6ad 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -302,9 +302,26 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid);
+struct crush_loc {
+ char *cl_type_name;
+ char *cl_name;
+};
+
+struct crush_loc_node {
+ struct rb_node cl_node;
+ struct crush_loc cl_loc; /* pointers into cl_data */
+ char cl_data[];
+};
+
+int ceph_parse_crush_location(char *crush_location, struct rb_root *locs);
+int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2);
+void ceph_clear_crush_locs(struct rb_root *locs);
+
+int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
+ struct rb_root *locs);
+
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
-
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 88ed3c5c04c5..3a518fd0eaad 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -465,6 +465,19 @@ enum {
const char *ceph_osd_watch_op_name(int o);
enum {
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2,
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8,
+ CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16,
+ CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32,
+ CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64,
+ CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128,
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256,
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512,
+};
+
+enum {
CEPH_OSD_BACKOFF_OP_BLOCK = 1,
CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2,
CEPH_OSD_BACKOFF_OP_UNBLOCK = 3,
@@ -517,6 +530,7 @@ struct ceph_osd_op {
struct {
__le64 expected_object_size;
__le64 expected_write_size;
+ __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} __attribute__ ((packed)) alloc_hint;
struct {
__le64 snapid;
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 2b1b35240074..3f01d43f0598 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state);
extern void tegra210_put_utmipll_in_iddq(void);
extern void tegra210_put_utmipll_out_iddq(void);
extern int tegra210_clk_handle_mbist_war(unsigned int id);
+extern void tegra210_clk_emc_dll_enable(bool flag);
+extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+extern void tegra210_clk_emc_update_setting(u32 emc_src_value);
struct clk;
@@ -143,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
void *cb_arg);
int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+struct tegra210_clk_emc_config {
+ unsigned long rate;
+ bool same_freq;
+ u32 value;
+
+ unsigned long parent_rate;
+ u8 parent;
+};
+
+struct tegra210_clk_emc_provider {
+ struct module *owner;
+ struct device *dev;
+
+ struct tegra210_clk_emc_config *configs;
+ unsigned int num_configs;
+
+ int (*set_rate)(struct device *dev,
+ const struct tegra210_clk_emc_config *config);
+};
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider);
+void tegra210_clk_emc_detach(struct clk *clk);
+
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index a0eabfbeb0e1..6fa0eea3f530 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -97,7 +97,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int classzone_idx);
+ unsigned int alloc_flags, int highest_zoneidx);
extern void defer_compaction(struct zone *zone, int order);
extern bool compaction_deferred(struct zone *zone, int order);
@@ -182,7 +182,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
extern int kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
-extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
static inline void reset_isolation_suitable(pg_data_t *pgdat)
@@ -190,7 +190,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
}
static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int classzone_idx)
+ int alloc_flags, int highest_zoneidx)
{
return COMPACT_SKIPPED;
}
@@ -232,7 +232,8 @@ static inline void kcompactd_stop(int nid)
{
}
-static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+static inline void wakeup_kcompactd(pg_data_t *pgdat,
+ int order, int highest_zoneidx)
{
}
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 790c0c6b8552..ee37256ec8bd 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
@@ -24,6 +24,15 @@
#define __no_sanitize_address
#endif
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index d7ee4c6bad48..7dd4e0349ef3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,7 +10,8 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-#if GCC_VERSION < 40600
+/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
+#if GCC_VERSION < 40800
# error Sorry, your compiler is too old - please upgrade it.
#endif
@@ -126,9 +127,7 @@
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
-#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
-#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 70000
@@ -145,6 +144,12 @@
#define __no_sanitize_address
#endif
+#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
+#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#else
+#define __no_sanitize_thread
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6325d64e3c3b..30827f82ad62 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -230,60 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
-#include <uapi/linux/types.h>
-
-#define __READ_ONCE_SIZE \
-({ \
- switch (size) { \
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- default: \
- barrier(); \
- __builtin_memcpy((void *)res, (const void *)p, size); \
- barrier(); \
- } \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * We can't declare function 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
-#else
-# define __no_kasan_or_inline __always_inline
-#endif
-
-static __no_kasan_or_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
-}
-
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
@@ -293,11 +239,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* statements.
*
* These two macros will also work on aggregate data types like structs or
- * unions. If the size of the accessed data type exceeds the word size of
- * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
- * fall back to memcpy(). There's at least two memcpy()s: one for the
- * __builtin_memcpy() and then one for the macro doing the copy of variable
- * - '__u' allocated on the stack.
+ * unions.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
@@ -308,24 +250,79 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#include <asm/barrier.h>
#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ *
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
+ */
+#define data_race(expr) \
+({ \
+ __unqual_scalar_typeof(({ expr; })) __v = ({ \
+ __kcsan_disable_current(); \
+ expr; \
+ }); \
+ __kcsan_enable_current(); \
+ __v; \
+})
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity or dependency ordering guarantees. Note that this may result
+ * in tears!
+ */
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
-#define __READ_ONCE(x, check) \
+#define __READ_ONCE_SCALAR(x) \
({ \
- union { typeof(x) __val; char __c[1]; } __u; \
- if (check) \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- else \
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
- __u.__val; \
+ __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
+ smp_read_barrier_depends(); \
+ (typeof(x))__x; \
})
-#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE_SCALAR(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
*/
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ unsigned long __x; \
+ compiletime_assert(sizeof(x) == sizeof(__x), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ __x = __read_once_word_nocheck(&(x)); \
+ smp_read_barrier_depends(); \
+ (typeof(x))__x; \
+})
static __no_kasan_or_inline
unsigned long read_word_at_a_time(const void *addr)
@@ -334,14 +331,6 @@ unsigned long read_word_at_a_time(const void *addr)
return *(unsigned long *)addr;
}
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(x)) (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
#endif /* __KERNEL__ */
/*
@@ -406,6 +395,16 @@ static inline void *offset_to_ptr(const int *off)
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6fcf73200b67..21aed0981edf 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -171,6 +171,38 @@ struct ftrace_likely_data {
*/
#define noinline_for_stack noinline
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#define __no_kcsan __no_sanitize_thread
+#ifdef __SANITIZE_THREAD__
+# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -218,6 +250,53 @@ struct ftrace_likely_data {
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged.
+ */
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
+/*
+ * We build this out of a couple of helper macros in a vain attempt to
+ * help you keep your lunch down while reading it.
+ */
+#define __pick_scalar_type(x, type, otherwise) \
+ __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
+
+/*
+ * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
+ * so we include the naked type here as well as the signed/unsigned variants.
+ */
+#define __pick_integer_type(x, type, otherwise) \
+ __pick_scalar_type(x, type, \
+ __pick_scalar_type(x, unsigned type, \
+ __pick_scalar_type(x, signed type, otherwise)))
+
+#define __unqual_scalar_typeof(x) typeof( \
+ __pick_integer_type(x, char, \
+ __pick_integer_type(x, short, \
+ __pick_integer_type(x, int, \
+ __pick_integer_type(x, long, \
+ __pick_integer_type(x, long long, x))))))
+#else
+/*
+ * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (x)))
+#endif
+
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 8cac62ee6add..981b880d5b60 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -33,13 +33,13 @@ static inline void user_exit(void)
}
/* Called with interrupts disabled. */
-static inline void user_enter_irqoff(void)
+static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_USER);
}
-static inline void user_exit_irqoff(void)
+static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_USER);
@@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
* is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging.
*/
-static inline enum ctx_state ct_state(void)
+static __always_inline enum ctx_state ct_state(void)
{
return context_tracking_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index e7fe6678b7ad..65a60d3313b0 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -26,12 +26,12 @@ struct context_tracking {
extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_enabled(void)
+static __always_inline bool context_tracking_enabled(void)
{
return static_branch_unlikely(&context_tracking_key);
}
-static inline bool context_tracking_enabled_cpu(int cpu)
+static __always_inline bool context_tracking_enabled_cpu(int cpu)
{
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
@@ -41,7 +41,7 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
-static inline bool context_tracking_in_user(void)
+static __always_inline bool context_tracking_in_user(void)
{
return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 193cc9dbf448..e3e9f0e3a878 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -100,10 +100,12 @@ union coresight_dev_subtype {
};
/**
- * struct coresight_platform_data - data harvested from the DT specification
- * @nr_inport: number of input ports for this component.
- * @nr_outport: number of output ports for this component.
- * @conns: Array of nr_outport connections from this component
+ * struct coresight_platform_data - data harvested from the firmware
+ * specification.
+ *
+ * @nr_inport: Number of elements for the input connections.
+ * @nr_outport: Number of elements for the output connections.
+ * @conns: Sparse array of nr_outport connections from this component.
*/
struct coresight_platform_data {
int nr_inport;
@@ -140,12 +142,28 @@ struct coresight_desc {
* @chid_fwnode: remote component's fwnode handle.
* @child_dev: a @coresight_device representation of the component
connected to @outport.
+ * @link: Representation of the connection as a sysfs link.
*/
struct coresight_connection {
int outport;
int child_port;
struct fwnode_handle *child_fwnode;
struct coresight_device *child_dev;
+ struct coresight_sysfs_link *link;
+};
+
+/**
+ * struct coresight_sysfs_link - representation of a connection in sysfs.
+ * @orig: Originating (master) coresight device for the link.
+ * @orig_name: Name to use for the link orig->target.
+ * @target: Target (slave) coresight device for the link.
+ * @target_name: Name to use for the link target->orig.
+ */
+struct coresight_sysfs_link {
+ struct coresight_device *orig;
+ const char *orig_name;
+ struct coresight_device *target;
+ const char *target_name;
};
/**
@@ -165,6 +183,9 @@ struct coresight_connection {
* @ea: Device attribute for sink representation under PMU directory.
* @ect_dev: Associated cross trigger device. Not part of the trace data
* path or connections.
+ * @nr_links: number of sysfs links created to other components from this
+ * device. These will appear in the "connections" group.
+ * @has_conns_grp: Have added a "connections" group for sysfs links.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
@@ -180,6 +201,9 @@ struct coresight_device {
struct dev_ext_attribute *ea;
/* cross trigger handling */
struct coresight_device *ect_dev;
+ /* sysfs links between components */
+ int nr_links;
+ bool has_conns_grp;
};
/*
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 65501d8f9778..a3bdc8a98f2c 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
struct cpuidle_driver;
#ifdef CONFIG_CPU_IDLE_THERMAL
-int cpuidle_cooling_register(struct cpuidle_driver *drv);
-int cpuidle_of_cooling_register(struct device_node *np,
- struct cpuidle_driver *drv);
+void cpuidle_cooling_register(struct cpuidle_driver *drv);
#else /* CONFIG_CPU_IDLE_THERMAL */
-static inline int cpuidle_cooling_register(struct cpuidle_driver *drv)
+static inline void cpuidle_cooling_register(struct cpuidle_driver *drv)
{
- return 0;
-}
-static inline int cpuidle_of_cooling_register(struct device_node *np,
- struct cpuidle_driver *drv)
-{
- return 0;
}
#endif /* CONFIG_CPU_IDLE_THERMAL */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 67d5950bd878..3494f6763597 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -367,7 +367,7 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
- int (*set_boost)(int state);
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
};
/* flags */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 24b3a77810b6..191772d4a4d7 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
@@ -142,6 +143,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index bc156285d097..a5192b718dbe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,9 +5,10 @@
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <asm/pgtable.h> /* for pgprot_t */
+#include <linux/pgtable.h> /* for pgprot_t */
#ifdef CONFIG_CRASH_DUMP
#define ELFCORE_ADDR_MAX (-1ULL)
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 54741295c70b..33c16f2de7f6 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -87,7 +87,7 @@ struct crush_rule_mask {
struct crush_rule {
__u32 len;
struct crush_rule_mask mask;
- struct crush_rule_step steps[0];
+ struct crush_rule_step steps[];
};
#define crush_rule_size(len) (sizeof(struct crush_rule) + \
@@ -301,6 +301,12 @@ struct crush_map {
__u32 *choose_tries;
#else
+ /* device/bucket type id -> type name (CrushWrapper::type_map) */
+ struct rb_root type_names;
+
+ /* device/bucket id -> name (CrushWrapper::name_map) */
+ struct rb_root names;
+
/* CrushWrapper::choose_args */
struct rb_root choose_args;
#endif
@@ -342,4 +348,10 @@ struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
};
+#ifdef __KERNEL__
+/* osdmap.c */
+void clear_crush_names(struct rb_root *root);
+void clear_choose_args(struct crush_map *c);
+#endif
+
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index d7af5d243f24..6904d4e0b2e0 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -5,7 +5,6 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/radix-tree.h>
-#include <asm/pgtable.h>
/* Flag for synchronous flush */
#define DAXDEV_F_SYNC (1UL << 0)
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 257ab3c92cb8..e7e45f0cc7da 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -12,7 +12,7 @@ extern int debug_locks __read_mostly;
extern int debug_locks_silent __read_mostly;
-static inline int __debug_locks_off(void)
+static __always_inline int __debug_locks_off(void)
{
return xchg(&debug_locks, 0);
}
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 5aad06b4ca7b..3028b644b4fb 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -109,7 +109,8 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#define dev_info(dev, fmt, ...) \
_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define dev_dbg(dev, fmt, ...) \
dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#elif defined(DEBUG)
@@ -181,7 +182,8 @@ do { \
dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
#define dev_info_ratelimited(dev, fmt, ...) \
dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index af48d9da3916..8750f2dc5613 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -332,6 +332,8 @@ void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+u64 dm_start_time_ns_from_clone(struct bio *bio);
+
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -557,13 +559,8 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#ifdef CONFIG_DM_DEBUG
-#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
+#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#else
-#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 3c8b7d274bd9..29d255fdd5d6 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -119,6 +119,11 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
int dm_bufio_issue_flush(struct dm_bufio_client *c);
/*
+ * Send a discard request to the underlying device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
+
+/*
* Like dm_bufio_release but also move the buffer to the new
* block. dm_bufio_write_dirty_buffers is needed to commit the new block.
*/
@@ -132,6 +137,13 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
/*
+ * Free the given range of buffers.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
+
+/*
* Set the minimum number of buffers before cleanup happens.
*/
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 24b8684aa21d..136f984df0d9 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -67,6 +67,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
}
u64 dma_direct_get_required_mask(struct device *dev);
+gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
+ u64 *phys_mask);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 330ad58fbf4d..78f677cf45ab 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
+/**
+ * dma_map_sgtable - Map the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the map operation
+ *
+ * Maps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After success the
+ * ownership for the buffer is transferred to the DMA domain. One has to
+ * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
+ * ownership of the buffer back to the CPU domain before touching the
+ * buffer by the CPU.
+ *
+ * Returns 0 on success or -EINVAL on error during mapping the buffer.
+ */
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int nents;
+
+ nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+ if (nents <= 0)
+ return -EINVAL;
+ sgt->nents = nents;
+ return 0;
+}
+
+/**
+ * dma_unmap_sgtable - Unmap the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the unmap operation
+ *
+ * Unmaps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After this function
+ * the ownership of the buffer is transferred back to the CPU domain.
+ */
+static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
+
+/**
+ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the CPU domain, so it is safe to perform any access to it
+ * by the CPU. Before doing any further DMA operations, one has to transfer
+ * the ownership of the buffer back to the DMA domain by calling the
+ * dma_sync_sgtable_for_device().
+ */
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
+/**
+ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the DMA domain, so it is safe to perform the DMA operation.
+ * Once finished, one has to call dma_sync_sgtable_for_cpu() or
+ * dma_unmap_sgtable().
+ */
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
@@ -630,9 +710,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);
-bool dma_in_atomic_pool(void *start, size_t size);
-void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
-bool dma_free_from_pool(void *start, size_t size);
+void *dma_alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, gfp_t flags);
+bool dma_free_from_pool(struct device *dev, void *start, size_t size);
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index b59f1b6be3e9..ca09a4e07d2d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -3,7 +3,7 @@
#define _LINUX_DMA_NONCOHERENT_H 1
#include <linux/dma-mapping.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e1c03339918f..6283917edd90 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -153,7 +153,7 @@ struct dma_interleaved_template {
bool dst_sgl;
size_t numf;
size_t frame_size;
- struct data_chunk sgl[0];
+ struct data_chunk sgl[];
};
/**
@@ -535,7 +535,7 @@ struct dmaengine_unmap_data {
struct device *dev;
struct kref kref;
size_t len;
- dma_addr_t addr[0];
+ dma_addr_t addr[];
};
struct dma_async_tx_descriptor;
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4cf02ecd67de..abcd5fde30eb 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -48,7 +48,7 @@ struct _ddebug {
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
extern int ddebug_remove_module(const char *mod_name);
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 0f20b986b0ab..6eb7d55d7c3d 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -31,14 +31,6 @@ struct device;
extern int edac_op_state;
struct bus_type *edac_get_sysfs_subsys(void);
-int edac_get_report_status(void);
-void edac_set_report_status(int new);
-
-enum {
- EDAC_REPORTING_ENABLED,
- EDAC_REPORTING_DISABLED,
- EDAC_REPORTING_FORCE
-};
static inline void opstate_init(void)
{
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 594d4e78654f..69b136e4dd2b 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -54,7 +54,7 @@
.popsection ;
#define ELFNOTE(name, type, desc) \
- ELFNOTE_START(name, type, "") \
+ ELFNOTE_START(name, type, "a") \
desc ; \
ELFNOTE_END
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index 8fbe4f97ffad..1e7bf78cb382 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -67,5 +67,5 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{
return -EOPNOTSUPP;
}
-#endif /* IS_ENABLED(ETHTOOL_NETLINK) */
+#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
new file mode 100644
index 000000000000..4e624c466583
--- /dev/null
+++ b/include/linux/fiemap.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FIEMAP_H
+#define _LINUX_FIEMAP_H 1
+
+#include <uapi/linux/fiemap.h>
+#include <linux/fs.h>
+
+struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+ struct fiemap_extent __user *fi_extents_start; /* Start of
+ fiemap_extent array */
+};
+
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags);
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+
+int generic_block_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo, u64 start, u64 len,
+ get_block_t *get_block);
+
+#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 4bbd0afd91b7..cb3e2c06ed8a 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -12,7 +12,6 @@
struct firmware {
size_t size;
const u8 *data;
- struct page **pages;
/* firmware loader private fields */
void *priv;
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 17ba4e405129..3fa418a4ca67 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -11,7 +11,6 @@
#define _SC_SCI_H
#include <linux/firmware/imx/ipc.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
#include <linux/firmware/imx/svc/pm.h>
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
deleted file mode 100644
index 80821100e85f..000000000000
--- a/include/linux/firmware/imx/types.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017~2018 NXP
- *
- * Header file containing types used across multiple service APIs.
- */
-
-#ifndef _SC_TYPES_H
-#define _SC_TYPES_H
-
-/*
- * This type is used to indicate a control.
- */
-enum imx_sc_ctrl {
- IMX_SC_C_TEMP = 0,
- IMX_SC_C_TEMP_HI = 1,
- IMX_SC_C_TEMP_LOW = 2,
- IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
- IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
- IMX_SC_C_PXL_LINK_MST_ENB = 5,
- IMX_SC_C_PXL_LINK_MST1_ENB = 6,
- IMX_SC_C_PXL_LINK_MST2_ENB = 7,
- IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
- IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
- IMX_SC_C_PXL_LINK_MST_VLD = 10,
- IMX_SC_C_PXL_LINK_MST1_VLD = 11,
- IMX_SC_C_PXL_LINK_MST2_VLD = 12,
- IMX_SC_C_SINGLE_MODE = 13,
- IMX_SC_C_ID = 14,
- IMX_SC_C_PXL_CLK_POLARITY = 15,
- IMX_SC_C_LINESTATE = 16,
- IMX_SC_C_PCIE_G_RST = 17,
- IMX_SC_C_PCIE_BUTTON_RST = 18,
- IMX_SC_C_PCIE_PERST = 19,
- IMX_SC_C_PHY_RESET = 20,
- IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
- IMX_SC_C_PANIC = 22,
- IMX_SC_C_PRIORITY_GROUP = 23,
- IMX_SC_C_TXCLK = 24,
- IMX_SC_C_CLKDIV = 25,
- IMX_SC_C_DISABLE_50 = 26,
- IMX_SC_C_DISABLE_125 = 27,
- IMX_SC_C_SEL_125 = 28,
- IMX_SC_C_MODE = 29,
- IMX_SC_C_SYNC_CTRL0 = 30,
- IMX_SC_C_KACHUNK_CNT = 31,
- IMX_SC_C_KACHUNK_SEL = 32,
- IMX_SC_C_SYNC_CTRL1 = 33,
- IMX_SC_C_DPI_RESET = 34,
- IMX_SC_C_MIPI_RESET = 35,
- IMX_SC_C_DUAL_MODE = 36,
- IMX_SC_C_VOLTAGE = 37,
- IMX_SC_C_PXL_LINK_SEL = 38,
- IMX_SC_C_OFS_SEL = 39,
- IMX_SC_C_OFS_AUDIO = 40,
- IMX_SC_C_OFS_PERIPH = 41,
- IMX_SC_C_OFS_IRQ = 42,
- IMX_SC_C_RST0 = 43,
- IMX_SC_C_RST1 = 44,
- IMX_SC_C_SEL0 = 45,
- IMX_SC_C_LAST
-};
-
-#endif /* _SC_TYPES_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 013ae4819deb..682dbf694007 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -54,32 +54,25 @@
* Secure monitor software doesn't recognize the request.
*
* INTEL_SIP_SMC_STATUS_OK:
- * FPGA configuration completed successfully,
- * In case of FPGA configuration write operation, it means secure monitor
- * software can accept the next chunk of FPGA configuration data.
+ * Secure monitor software accepts the service client's request.
*
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
- * In case of FPGA configuration write operation, it means secure monitor
- * software is still processing previous data & can't accept the next chunk
- * of data. Service driver needs to issue
- * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the
- * completed block(s).
+ * INTEL_SIP_SMC_STATUS_BUSY:
+ * Secure monitor software is still processing service client's request.
*
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
- * There is error during the FPGA configuration process.
+ * INTEL_SIP_SMC_STATUS_REJECTED:
+ * Secure monitor software reject the service client's request.
*
- * INTEL_SIP_SMC_REG_ERROR:
- * There is error during a read or write operation of the protected registers.
+ * INTEL_SIP_SMC_STATUS_ERROR:
+ * There is error during the process of service request.
*
* INTEL_SIP_SMC_RSU_ERROR:
- * There is error during a remote status update.
+ * There is error during the process of remote status update request.
*/
#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
#define INTEL_SIP_SMC_STATUS_OK 0x0
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4
-#define INTEL_SIP_SMC_REG_ERROR 0x5
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
#define INTEL_SIP_SMC_RSU_ERROR 0x7
/**
@@ -95,7 +88,7 @@
* a2-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
@@ -115,8 +108,8 @@
* a3-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1: 64bit physical address of 1st completed memory block if any completed
* block, otherwise zero value.
* a2: 64bit physical address of 2nd completed memory block if any completed
@@ -133,15 +126,15 @@
*
* Sync call used by service driver at EL1 to track the completed write
* transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
- * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY.
+ * call returns INTEL_SIP_SMC_STATUS_BUSY.
*
* Call register usage:
* a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1: 64bit physical address of 1st completed memory block.
* a2: 64bit physical address of 2nd completed memory block if
* any completed block, otherwise zero value.
@@ -164,8 +157,8 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
@@ -183,7 +176,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
* a1: start of physical address of reserved memory block.
* a2: size of reserved memory block.
* a3: not used.
@@ -203,7 +196,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 59bc6e2af693..64213c3e82f5 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -18,45 +18,37 @@
/**
* Status of the sent command, in bit number
*
- * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK:
- * Secure firmware accepts the request of FPGA reconfiguration.
+ * SVC_STATUS_OK:
+ * Secure firmware accepts the request issued by one of service clients.
*
- * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED:
- * Service client successfully submits FPGA configuration
- * data buffer to secure firmware.
+ * SVC_STATUS_BUFFER_SUBMITTED:
+ * Service client successfully submits data buffer to secure firmware.
*
- * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE:
+ * SVC_STATUS_BUFFER_DONE:
* Secure firmware completes data process, ready to accept the
* next WRITE transaction.
*
- * SVC_COMMAND_STATUS_RECONFIG_COMPLETED:
- * Secure firmware completes FPGA configuration successfully, FPGA should
- * be in user mode.
+ * SVC_STATUS_COMPLETED:
+ * Secure firmware completes service request successfully. In case of
+ * FPGA configuration, FPGA should be in user mode.
*
- * SVC_COMMAND_STATUS_RECONFIG_BUSY:
- * FPGA configuration is still in process.
+ * SVC_COMMAND_STATUS_BUSY:
+ * Service request is still in process.
*
- * SVC_COMMAND_STATUS_RECONFIG_ERROR:
- * Error encountered during FPGA configuration.
+ * SVC_COMMAND_STATUS_ERROR:
+ * Error encountered during the process of the service request.
*
- * SVC_STATUS_RSU_OK:
- * Secure firmware accepts the request of remote status update (RSU).
- *
- * SVC_STATUS_RSU_ERROR:
- * Error encountered during remote system update.
- *
- * SVC_STATUS_RSU_NO_SUPPORT:
- * Secure firmware doesn't support RSU retry or notify feature.
+ * SVC_STATUS_NO_SUPPORT:
+ * Secure firmware doesn't support requested features such as RSU retry
+ * or RSU notify.
*/
-#define SVC_STATUS_RECONFIG_REQUEST_OK 0
-#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
-#define SVC_STATUS_RECONFIG_BUFFER_DONE 2
-#define SVC_STATUS_RECONFIG_COMPLETED 3
-#define SVC_STATUS_RECONFIG_BUSY 4
-#define SVC_STATUS_RECONFIG_ERROR 5
-#define SVC_STATUS_RSU_OK 6
-#define SVC_STATUS_RSU_ERROR 7
-#define SVC_STATUS_RSU_NO_SUPPORT 8
+#define SVC_STATUS_OK 0
+#define SVC_STATUS_BUFFER_SUBMITTED 1
+#define SVC_STATUS_BUFFER_DONE 2
+#define SVC_STATUS_COMPLETED 3
+#define SVC_STATUS_BUSY 4
+#define SVC_STATUS_ERROR 5
+#define SVC_STATUS_NO_SUPPORT 6
/**
* Flag bit for COMMAND_RECONFIG
@@ -84,32 +76,29 @@ struct stratix10_svc_chan;
* @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
*
* @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
- * is SVC_STATUS_RECONFIG_REQUEST_OK
+ * is SVC_STATUS_OK
*
* @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
- * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
- * or SVC_STATUS_RECONFIG_ERROR
+ * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR
*
* @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
- * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
- * SVC_STATUS_RECONFIG_ERROR
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
*
* @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
- * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
- * SVC_STATUS_RECONFIG_ERROR
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
*
* @COMMAND_RSU_STATUS: request remote system update boot log, return status
* is log data or SVC_STATUS_RSU_ERROR
*
* @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
- * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
* @COMMAND_RSU_NOTIFY: report the status of hard processor system
- * software to firmware, return status is SVC_STATUS_RSU_OK or
- * SVC_STATUS_RSU_ERROR
+ * software to firmware, return status is SVC_STATUS_OK or
+ * SVC_STATUS_ERROR
*
* @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
- * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
COMMAND_NOOP = 0,
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
index 2549a2db56aa..be5984bda592 100644
--- a/include/linux/firmware/trusted_foundations.h
+++ b/include/linux/firmware/trusted_foundations.h
@@ -32,6 +32,7 @@
#define TF_PM_MODE_LP1_NO_MC_CLK 2
#define TF_PM_MODE_LP2 3
#define TF_PM_MODE_LP2_NOFLUSH_L2 4
+#define TF_PM_MODE_NONE 5
struct trusted_foundations_platform_data {
unsigned int version_major;
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 8efa5ac22d7e..5968df82b991 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -42,6 +42,8 @@
#define ZYNQMP_PM_MAX_QOS 100U
+#define GSS_NUM_REGS (4)
+
/* Node capabilities */
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
@@ -62,6 +64,7 @@
enum pm_api_id {
PM_GET_API_VERSION = 1,
+ PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
PM_RELEASE_NODE,
PM_SET_REQUIREMENT,
@@ -107,6 +110,12 @@ enum pm_ioctl_id {
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_WRITE_GGS = 12,
+ IOCTL_READ_GGS = 13,
+ IOCTL_WRITE_PGGS = 14,
+ IOCTL_READ_PGGS = 15,
+ /* Set healthy bit value */
+ IOCTL_SET_BOOT_HEALTH_STATUS = 17,
};
enum pm_query_id {
@@ -279,6 +288,18 @@ enum dll_reset_type {
PM_DLL_RESET_PULSE,
};
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -293,49 +314,199 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-struct zynqmp_eemi_ops {
- int (*get_api_version)(u32 *version);
- int (*get_chipid)(u32 *idcode, u32 *version);
- int (*fpga_load)(const u64 address, const u32 size, const u32 flags);
- int (*fpga_get_status)(u32 *value);
- int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
- int (*clock_enable)(u32 clock_id);
- int (*clock_disable)(u32 clock_id);
- int (*clock_getstate)(u32 clock_id, u32 *state);
- int (*clock_setdivider)(u32 clock_id, u32 divider);
- int (*clock_getdivider)(u32 clock_id, u32 *divider);
- int (*clock_setrate)(u32 clock_id, u64 rate);
- int (*clock_getrate)(u32 clock_id, u64 *rate);
- int (*clock_setparent)(u32 clock_id, u32 parent_id);
- int (*clock_getparent)(u32 clock_id, u32 *parent_id);
- int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
- int (*reset_assert)(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag);
- int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status);
- int (*init_finalize)(void);
- int (*set_suspend_mode)(u32 mode);
- int (*request_node)(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack);
- int (*release_node)(const u32 node);
- int (*set_requirement)(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack);
- int (*aes)(const u64 address, u32 *out);
-};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
+int zynqmp_pm_get_api_version(u32 *version);
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
+int zynqmp_pm_clock_enable(u32 clock_id);
+int zynqmp_pm_clock_disable(u32 clock_id);
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode);
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag);
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+int zynqmp_pm_init_finalize(void);
+int zynqmp_pm_set_suspend_mode(u32 mode);
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_release_node(const u32 node);
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_write_ggs(u32 index, u32 value);
+int zynqmp_pm_read_ggs(u32 index, u32 *value);
+int zynqmp_pm_write_pggs(u32 index, u32 value);
+int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+int zynqmp_pm_set_boot_health_status(u32 value);
#else
static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
return ERR_PTR(-ENODEV);
}
+static inline int zynqmp_pm_get_api_version(u32 *version)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
+ u32 *out)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_init_finalize(void)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_release_node(const u32 node)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_requirement(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return -ENODEV;
+}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
index 7fc95d5c95bb..141ac3f251e6 100644
--- a/include/linux/fpga/adi-axi-common.h
+++ b/include/linux/fpga/adi-axi-common.h
@@ -11,9 +11,13 @@
#ifndef ADI_AXI_COMMON_H_
#define ADI_AXI_COMMON_H_
-#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_VERSION 0x0000
#define ADI_AXI_PCORE_VER(major, minor, patch) \
(((major) << 16) | ((minor) << 8) | (patch))
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9780ac31387c..6c4ab4dc1cd7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -24,7 +24,6 @@
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
-#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
@@ -48,6 +47,7 @@ struct backing_dev_info;
struct bdi_writeback;
struct bio;
struct export_operations;
+struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
@@ -1048,6 +1048,7 @@ struct lock_manager_operations {
bool (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
};
struct lock_manager {
@@ -1412,6 +1413,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+
/* Possible states of 'frozen' field */
enum {
SB_UNFROZEN = 0, /* FS is unfrozen */
@@ -1679,10 +1682,10 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*
* Since page fault freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
* handling code implies lock dependency:
*
- * mmap_sem
+ * mmap_lock
* -> sb_start_pagefault
*/
static inline void sb_start_pagefault(struct super_block *sb)
@@ -1755,19 +1758,6 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
umode_t mode);
extern bool may_open_dev(const struct path *path);
-/*
- * VFS FS_IOC_FIEMAP helper definitions.
- */
-struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
-};
-int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
- u64 phys, u64 len, u32 flags);
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
/*
* This is the "filldir" function type, used by readdir() to let
@@ -3094,6 +3084,9 @@ extern struct inode *find_inode_nowait(struct super_block *,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
+extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
+ int (*)(struct inode *, void *), void *);
+extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -3211,6 +3204,8 @@ enum {
DIO_SKIP_HOLES = 0x02,
};
+void dio_end_io(struct bio *bio);
+
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
@@ -3323,14 +3318,6 @@ static inline int vfs_fstat(int fd, struct kstat *stat)
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
-extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len,
- get_block_t *get_block);
-extern int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
-
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index ce0b5fbf239d..3f0b19dcfae7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -46,7 +46,7 @@ struct fscache_cache_tag {
unsigned long flags;
#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
atomic_t usage;
- char name[0]; /* tag name */
+ char name[]; /* tag name */
};
/*
diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h
index a0e2e6b19b57..154e541ce57e 100644
--- a/include/linux/fsl/bestcomm/bestcomm.h
+++ b/include/linux/fsl/bestcomm/bestcomm.h
@@ -27,7 +27,7 @@
*/
struct bcom_bd {
u32 status;
- u32 data[0]; /* variable payload size */
+ u32 data[]; /* variable payload size */
};
/* ======================================================================== */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dfbbf7a7208b..e339dac91ee6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -342,9 +342,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
extern int stack_tracer_enabled;
-int stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index e0abafbb17f8..9506f8ec0974 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -171,5 +171,7 @@ struct fwnode_operations {
#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
extern u32 fw_devlink_get_flags(void);
+void fw_devlink_pause(void);
+void fw_devlink_resume(void);
#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4aba4c86c626..67a0774e080b 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -110,6 +110,11 @@ struct vm_area_struct;
* the caller guarantees the allocation will allow more memory to be freed
* very shortly e.g. process exiting or swapping. Users either should
* be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
*
* %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the %__GFP_MEMALLOC flag if both are set.
@@ -307,7 +312,7 @@ struct vm_area_struct;
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
-static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
+static inline int gfp_migratetype(const gfp_t gfp_flags)
{
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index b8fc92c177eb..c4f272af7af5 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -253,6 +253,19 @@ struct gpio_irq_chip {
* Store old irq_chip irq_disable callback
*/
void (*irq_disable)(struct irq_data *data);
+ /**
+ * @irq_unmask:
+ *
+ * Store old irq_chip irq_unmask callback
+ */
+ void (*irq_unmask)(struct irq_data *data);
+
+ /**
+ * @irq_mask:
+ *
+ * Store old irq_chip irq_mask callback
+ */
+ void (*irq_mask)(struct irq_data *data);
};
/**
@@ -267,9 +280,9 @@ struct gpio_irq_chip {
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error.
- * It is recommended to always implement this function, even on
- * input-only or output-only gpio chips.
+ * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
+ * or negative error. It is recommended to always implement this
+ * function, even on input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
* This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
@@ -349,30 +362,30 @@ struct gpio_chip {
struct module *owner;
int (*request)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
void (*free)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*get_direction)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*direction_input)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*direction_output)(struct gpio_chip *gc,
- unsigned offset, int value);
+ unsigned int offset, int value);
int (*get)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
void (*set)(struct gpio_chip *gc,
- unsigned offset, int value);
+ unsigned int offset, int value);
void (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
int (*set_config)(struct gpio_chip *gc,
- unsigned offset,
+ unsigned int offset,
unsigned long config);
int (*to_irq)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
void (*dbg_show)(struct seq_file *s,
struct gpio_chip *gc);
@@ -459,7 +472,7 @@ struct gpio_chip {
};
extern const char *gpiochip_is_requested(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
/* add/remove chips */
extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
@@ -599,6 +612,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
unsigned int offset);
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain);
+
#ifdef CONFIG_LOCKDEP
/*
@@ -657,9 +673,9 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc,
}
#endif /* CONFIG_LOCKDEP */
-int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset);
-void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset);
-int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config);
/**
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 1ebe5be05d5f..781a053abbb9 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -20,8 +20,11 @@ enum gpio_lookup_flags {
/**
* struct gpiod_lookup - lookup table
- * @chip_label: name of the chip the GPIO belongs to
- * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @key: either the name of the chip the GPIO belongs to, or the GPIO line name
+ * Note that GPIO line names are not guaranteed to be globally unique,
+ * so this will use the first match found!
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or
+ * U16_MAX to indicate that @key is a GPIO line name
* @con_id: name of the GPIO from the device's point of view
* @idx: index of the GPIO in case several GPIOs share the same name
* @flags: bitmask of gpio_lookup_flags GPIO_* values
@@ -30,7 +33,7 @@ enum gpio_lookup_flags {
* functions using platform data.
*/
struct gpiod_lookup {
- const char *chip_label;
+ const char *key;
u16 chip_hwnum;
const char *con_id;
unsigned int idx;
@@ -63,17 +66,17 @@ struct gpiod_hog {
/*
* Simple definition of a single GPIO under a con_id
*/
-#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
- GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
+#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags)
/*
* Use this macro if you need to have several GPIOs under the same con_id.
* Each GPIO needs to use a different index and can be accessed using
* gpiod_get_index()
*/
-#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
+#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
{ \
- .chip_label = _chip_label, \
+ .key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
.idx = _idx, \
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
new file mode 100644
index 000000000000..4c1e6b34e824
--- /dev/null
+++ b/include/linux/gpio/regmap.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_GPIO_REGMAP_H
+#define _LINUX_GPIO_REGMAP_H
+
+struct device;
+struct gpio_regmap;
+struct irq_domain;
+struct regmap;
+
+#define GPIO_REGMAP_ADDR_ZERO ((unsigned long)(-1))
+#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO)
+
+/**
+ * struct gpio_regmap_config - Description of a generic regmap gpio_chip.
+ * @parent: The parent device
+ * @regmap: The regmap used to access the registers
+ * given, the name of the device is used
+ * @label: (Optional) Descriptive name for GPIO controller.
+ * If not given, the name of the device is used.
+ * @ngpio: Number of GPIOs
+ * @names: (Optional) Array of names for gpios
+ * @reg_dat_base: (Optional) (in) register base address
+ * @reg_set_base: (Optional) set register base address
+ * @reg_clr_base: (Optional) clear register base address
+ * @reg_dir_in_base: (Optional) in setting register base address
+ * @reg_dir_out_base: (Optional) out setting register base address
+ * @reg_stride: (Optional) May be set if the registers (of the
+ * same type, dat, set, etc) are not consecutive.
+ * @ngpio_per_reg: Number of GPIOs per register
+ * @irq_domain: (Optional) IRQ domain if the controller is
+ * interrupt-capable
+ * @reg_mask_xlate: (Optional) Translates base address and GPIO
+ * offset to a register/bitmask pair. If not
+ * given the default gpio_regmap_simple_xlate()
+ * is used.
+ *
+ * The ->reg_mask_xlate translates a given base address and GPIO offset to
+ * register and mask pair. The base address is one of the given register
+ * base addresses in this structure.
+ *
+ * Although all register base addresses are marked as optional, there are
+ * several rules:
+ * 1. if you only have @reg_dat_base set, then it is input-only
+ * 2. if you only have @reg_set_base set, then it is output-only
+ * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then
+ * you have to set both @reg_dat_base and @reg_set_base
+ * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have
+ * two different registers for setting and clearing the output. This is
+ * also valid for the output-only case.
+ * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really
+ * hardware which has redundant registers?
+ *
+ * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO
+ * which forces the address to the value 0.
+ */
+struct gpio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+
+ const char *label;
+ int ngpio;
+ const char *const *names;
+
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+ int reg_stride;
+ int ngpio_per_reg;
+ struct irq_domain *irq_domain;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+};
+
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
+void gpio_regmap_unregister(struct gpio_regmap *gpio);
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config);
+void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data);
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
+
+#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
index dfbc6c39a74b..aeb8f9243545 100644
--- a/include/linux/greybus/greybus_protocols.h
+++ b/include/linux/greybus/greybus_protocols.h
@@ -345,7 +345,7 @@ struct gb_cap_get_ims_certificate_request {
struct gb_cap_get_ims_certificate_response {
__u8 result_code;
- __u8 certificate[0];
+ __u8 certificate[];
} __packed;
/* CAP authenticate request/response */
@@ -358,7 +358,7 @@ struct gb_cap_authenticate_request {
struct gb_cap_authenticate_response {
__u8 result_code;
__u8 response[64];
- __u8 signature[0];
+ __u8 signature[];
} __packed;
@@ -642,7 +642,7 @@ struct gb_hid_get_report_request {
struct gb_hid_set_report_request {
__u8 report_type;
__u8 report_id;
- __u8 report[0];
+ __u8 report[];
} __packed;
/* HID input report request, via interrupt pipe */
@@ -680,7 +680,7 @@ struct gb_i2c_transfer_op {
struct gb_i2c_transfer_request {
__le16 op_count;
- struct gb_i2c_transfer_op ops[0]; /* op_count of these */
+ struct gb_i2c_transfer_op ops[]; /* op_count of these */
} __packed;
struct gb_i2c_transfer_response {
__u8 data[0]; /* inbound data */
@@ -908,7 +908,7 @@ struct gb_spi_transfer_request {
__u8 chip_select; /* of the spi device */
__u8 mode; /* of the spi device */
__le16 count;
- struct gb_spi_transfer transfers[0]; /* count of these */
+ struct gb_spi_transfer transfers[]; /* count of these */
} __packed;
struct gb_spi_transfer_response {
@@ -1188,7 +1188,7 @@ struct gb_svc_pwrmon_rail_count_get_response {
struct gb_svc_pwrmon_rail_names_get_response {
__u8 status;
- __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+ __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
} __packed;
#define GB_SVC_PWRMON_TYPE_CURR 0x01
@@ -1281,7 +1281,7 @@ struct gb_svc_intf_oops_request {
struct gb_raw_send_request {
__le32 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
@@ -1300,7 +1300,7 @@ struct gb_raw_send_request {
/* Represents data from AP -> Module */
struct gb_uart_send_data_request {
__le16 size;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* recv-data-request flags */
@@ -1313,7 +1313,7 @@ struct gb_uart_send_data_request {
struct gb_uart_recv_data_request {
__le16 size;
__u8 flags;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_uart_receive_credits_request {
@@ -1382,14 +1382,14 @@ struct gb_loopback_transfer_request {
__le32 len;
__le32 reserved0;
__le32 reserved1;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_loopback_transfer_response {
__le32 len;
__le32 reserved0;
__le32 reserved1;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* SDIO */
@@ -1530,13 +1530,13 @@ struct gb_sdio_transfer_request {
__le16 data_blocks;
__le16 data_blksz;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_sdio_transfer_response {
__le16 data_blocks;
__le16 data_blksz;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* event request: generated by module and is defined as unidirectional */
@@ -1572,7 +1572,7 @@ struct gb_camera_configure_streams_request {
__u8 flags;
#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
__le16 padding;
- struct gb_camera_stream_config_request config[0];
+ struct gb_camera_stream_config_request config[];
} __packed;
/* Greybus Camera Configure Streams response payload */
@@ -1593,7 +1593,7 @@ struct gb_camera_configure_streams_response {
__u8 flags;
__u8 padding[2];
__le32 data_rate;
- struct gb_camera_stream_config_response config[0];
+ struct gb_camera_stream_config_response config[];
};
/* Greybus Camera Capture request payload - response has no payload */
@@ -1602,7 +1602,7 @@ struct gb_camera_capture_request {
__u8 streams;
__u8 padding;
__le16 num_frames;
- __u8 settings[0];
+ __u8 settings[];
} __packed;
/* Greybus Camera Flush response payload - request has no payload */
@@ -1616,7 +1616,7 @@ struct gb_camera_metadata_request {
__le16 frame_number;
__u8 stream;
__u8 padding;
- __u8 metadata[0];
+ __u8 metadata[];
} __packed;
/* Lights */
@@ -1993,7 +1993,7 @@ struct gb_audio_integer64 {
struct gb_audio_enumerated {
__le32 items;
__le16 names_length;
- __u8 names[0];
+ __u8 names[];
} __packed;
struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
@@ -2033,7 +2033,7 @@ struct gb_audio_widget {
__u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
__u8 state; /* GB_AUDIO_WIDGET_STATE_* */
__u8 ncontrols;
- struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
+ struct gb_audio_control ctl[]; /* 'ncontrols' entries */
} __packed;
struct gb_audio_route {
@@ -2059,7 +2059,7 @@ struct gb_audio_topology {
* struct gb_audio_widget widgets[num_widgets];
* struct gb_audio_route routes[num_routes];
*/
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_audio_get_topology_size_response {
@@ -2157,7 +2157,7 @@ struct gb_audio_streaming_event_request {
struct gb_audio_send_data_request {
__le64 timestamp;
- __u8 data[0];
+ __u8 data[];
} __packed;
@@ -2171,7 +2171,7 @@ struct gb_audio_send_data_request {
struct gb_log_send_log_request {
__le16 len;
- __u8 msg[0];
+ __u8 msg[];
} __packed;
#endif /* __GREYBUS_PROTOCOLS_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index e07cf853aa16..03c9fece7d43 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -38,9 +38,24 @@ static __always_inline void rcu_irq_enter_check_tick(void)
} while (0)
/*
+ * Like __irq_enter() without time accounting for fast
+ * interrupts, e.g. reschedule IPI where time accounting
+ * is more expensive than the actual interrupt.
+ */
+#define __irq_enter_raw() \
+ do { \
+ preempt_count_add(HARDIRQ_OFFSET); \
+ lockdep_hardirq_enter(); \
+ } while (0)
+
+/*
* Enter irq context (on NO_HZ, update jiffies):
*/
-extern void irq_enter(void);
+void irq_enter(void);
+/*
+ * Like irq_enter(), but RCU is already watching.
+ */
+void irq_enter_rcu(void);
/*
* Exit irq context without processing softirqs:
@@ -53,9 +68,23 @@ extern void irq_enter(void);
} while (0)
/*
+ * Like __irq_exit() without time accounting
+ */
+#define __irq_exit_raw() \
+ do { \
+ lockdep_hardirq_exit(); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
* Exit irq context and process softirqs if needed:
*/
-extern void irq_exit(void);
+void irq_exit(void);
+
+/*
+ * Like irq_exit(), but return with RCU watching.
+ */
+void irq_exit_rcu(void);
#ifndef arch_nmi_enter
#define arch_nmi_enter() do { } while (0)
@@ -87,20 +116,24 @@ extern void rcu_nmi_exit(void);
arch_nmi_enter(); \
printk_nmi_enter(); \
lockdep_off(); \
- ftrace_nmi_enter(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
lockdep_hardirq_enter(); \
+ instrumentation_begin(); \
+ ftrace_nmi_enter(); \
+ instrumentation_end(); \
} while (0)
#define nmi_exit() \
do { \
+ instrumentation_begin(); \
+ ftrace_nmi_exit(); \
+ instrumentation_end(); \
lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- ftrace_nmi_exit(); \
lockdep_on(); \
printk_nmi_exit(); \
arch_nmi_exit(); \
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ea5cdbd8c2c3..d6e82e3de027 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -32,8 +32,65 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h>
#ifdef CONFIG_HIGHMEM
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
+extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+void kunmap_high(struct page *page);
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ *
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_atomic_high_prot(page, prot);
+}
+#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
+
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern atomic_long_t _totalhigh_pages;
@@ -77,15 +134,21 @@ static inline struct page *kmap_to_page(void *addr)
static inline unsigned long totalhigh_pages(void) { return 0UL; }
-#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
{
might_sleep();
return page_address(page);
}
+static inline void kunmap_high(struct page *page)
+{
+}
+
static inline void kunmap(struct page *page)
{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
}
static inline void *kmap_atomic(struct page *page)
@@ -96,16 +159,20 @@ static inline void *kmap_atomic(struct page *page)
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void __kunmap_atomic(void *addr)
+static inline void kunmap_atomic_high(void *addr)
{
- pagefault_enable();
- preempt_enable();
+ /*
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+ * handles re-enabling faults + preemption
+ */
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_flush_unused() do {} while(0)
-#endif
#endif /* CONFIG_HIGHMEM */
@@ -149,7 +216,9 @@ static inline void kmap_atomic_idx_pop(void)
#define kunmap_atomic(addr) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
+ kunmap_atomic_high(addr); \
+ pagefault_enable(); \
+ preempt_enable(); \
} while (0)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index e912b9dc4633..f4a09ed223ac 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -10,7 +10,7 @@
#define LINUX_HMM_H
#include <linux/kconfig.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/device.h>
#include <linux/migrate.h>
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cfbb0a87c5f0..71f20776b06c 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -248,7 +248,7 @@ static inline int is_swap_pmd(pmd_t pmd)
return !pmd_none(pmd) && !pmd_present(pmd);
}
-/* mmap_sem must be held on entry */
+/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 92c21c5ccc58..50650d0d01b9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -9,7 +9,7 @@
#include <linux/cgroup.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
struct ctl_table;
struct user_struct;
@@ -518,8 +518,8 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
int __init __alloc_bootmem_huge_page(struct hstate *h);
int __init alloc_bootmem_huge_page(struct hstate *h);
-void __init hugetlb_bad_size(void);
void __init hugetlb_add_hstate(unsigned order);
+bool __init arch_hugetlb_valid_size(unsigned long size);
struct hstate *size_to_hstate(unsigned long size);
#ifndef HUGE_MAX_HSTATE
@@ -590,6 +590,20 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
#include <asm/hugetlb.h>
+#ifndef is_hugepage_only_range
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+#define is_hugepage_only_range is_hugepage_only_range
+#endif
+
+#ifndef arch_clear_hugepage_flags
+static inline void arch_clear_hugepage_flags(struct page *page) { }
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#endif
+
#ifndef arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable)
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 6058c3844a76..d7d4250cd1e4 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -80,6 +80,10 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
+int hw_breakpoint_weight(struct perf_event *bp);
+int arch_reserve_bp_slot(struct perf_event *bp);
+void arch_release_bp_slot(struct perf_event *bp);
+void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 8c5459034f92..1e4e0de4ef8b 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -2,7 +2,7 @@
/*
* i2c-smbus.h - SMBus extensions to the I2C protocol
*
- * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
+ * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de>
*/
#ifndef _LINUX_I2C_SMBUS_H
@@ -39,4 +39,10 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
}
#endif
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
+void i2c_register_spd(struct i2c_adapter *adap);
+#else
+static inline void i2c_register_spd(struct i2c_adapter *adap) { }
+#endif
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 49d29054e657..c10617bb980a 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -351,14 +351,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
return to_i2c_client(dev);
}
-static inline void *i2c_get_clientdata(const struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *client)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&client->dev);
}
-static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&client->dev, data);
}
/* I2C slave support */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
index a445cd1a36c5..91a8612b8bf9 100644
--- a/include/linux/idle_inject.h
+++ b/include/linux/idle_inject.h
@@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev,
void idle_inject_get_duration(struct idle_inject_device *ii_dev,
unsigned int *run_duration_us,
unsigned int *idle_duration_us);
+
+void idle_inject_set_latency(struct idle_inject_device *ii_dev,
+ unsigned int latency_ns);
+
#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5a127c0ed200..a3a838dcf8e4 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -133,62 +133,4 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
-#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type, _mask_all) \
- { \
- .type = (_type), \
- .differential = (_channel2 == -1 ? 0 : 1), \
- .indexed = 1, \
- .channel = (_channel1), \
- .channel2 = (_channel2), \
- .address = (_address), \
- .extend_name = (_extend_name), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_OFFSET), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = _mask_all, \
- .scan_index = (_si), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = (_bits), \
- .storagebits = (_storagebits), \
- .shift = (_shift), \
- .endianness = IIO_BE, \
- }, \
- }
-
-#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, 0)
-
-#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
- _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
#endif
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
new file mode 100644
index 000000000000..c5d48e1c2d36
--- /dev/null
+++ b/include/linux/iio/adc/adi-axi-adc.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices Generic AXI ADC IP core driver/library
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+#ifndef __ADI_AXI_ADC_H__
+#define __ADI_AXI_ADC_H__
+
+struct device;
+struct iio_chan_spec;
+
+/**
+ * struct adi_axi_adc_chip_info - Chip specific information
+ * @name Chip name
+ * @id Chip ID (usually product ID)
+ * @channels Channel specifications of type @struct axi_adc_chan_spec
+ * @num_channels Number of @channels
+ * @scale_table Supported scales by the chip; tuples of 2 ints
+ * @num_scales Number of scales in the table
+ * @max_rate Maximum sampling rate supported by the device
+ */
+struct adi_axi_adc_chip_info {
+ const char *name;
+ unsigned int id;
+
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+
+ unsigned long max_rate;
+};
+
+/**
+ * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
+ * @chip_info chip info details for the client ADC
+ * @preenable_setup op to run in the client before enabling the AXI ADC
+ * @reg_access IIO debugfs_reg_access hook for the client ADC
+ * @read_raw IIO read_raw hook for the client ADC
+ * @write_raw IIO write_raw hook for the client ADC
+ */
+struct adi_axi_adc_conv {
+ const struct adi_axi_adc_chip_info *chip_info;
+
+ int (*preenable_setup)(struct adi_axi_adc_conv *conv);
+ int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*read_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+ int (*write_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+};
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv);
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
+
+#endif
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 016d8a068353..ff15c61bf319 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -11,7 +11,7 @@
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index b3a57444a886..0e503db71289 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -14,4 +14,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel);
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel);
+
#endif
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index a4d2d8061ef6..a63dc07b7350 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -94,12 +94,6 @@ struct iio_buffer {
unsigned int watermark;
/* private: */
- /*
- * @scan_el_attrs: Control of scan elements if that scan mode
- * control method is used.
- */
- struct attribute_group *scan_el_attrs;
-
/* @scan_timestamp: Does the scan mode include a timestamp. */
bool scan_timestamp;
@@ -115,9 +109,6 @@ struct iio_buffer {
*/
struct attribute_group scan_el_group;
- /* @stufftoread: Flag to indicate new data. */
- bool stufftoread;
-
/* @attrs: Standard attributes of the buffer. */
const struct attribute **attrs;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 2bde8c912d4d..c4118dcb8e05 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -64,15 +64,6 @@ void iio_channel_release(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *consumer_channel);
/**
- * devm_iio_channel_release() - Resource managed version of
- * iio_channel_release().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: The channel to be released.
- */
-void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
-
-/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -106,15 +97,6 @@ void iio_channel_release_all(struct iio_channel *chan);
*/
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
-/**
- * devm_iio_channel_release_all() - Resource managed version of
- * iio_channel_release_all().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: Array channel to be released.
- */
-void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
-
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
index 44d48bb1d39f..e8255c2e33bc 100644
--- a/include/linux/iio/hw-consumer.h
+++ b/include/linux/iio/hw-consumer.h
@@ -14,7 +14,6 @@ struct iio_hw_consumer;
struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
-void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 25c87507a1fa..a1be82e74c93 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -492,7 +492,7 @@ struct iio_buffer_setup_ops {
* @buffer: [DRIVER] any buffer present
* @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [DRIVER] lock used to prevent simultaneous device state
+ * @mlock: [INTERN] lock used to prevent simultaneous device state
* changes
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
@@ -593,9 +593,6 @@ void iio_device_unregister(struct iio_dev *indio_dev);
* calls iio_device_register() internally. Refer to that function for more
* information.
*
- * If an iio_dev registered with this function needs to be unregistered
- * separately, devm_iio_device_unregister() must be used.
- *
* RETURNS:
* 0 on success, negative error number on failure.
*/
@@ -603,7 +600,6 @@ void iio_device_unregister(struct iio_dev *indio_dev);
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
void iio_device_release_direct_mode(struct iio_dev *indio_dev);
@@ -694,13 +690,9 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
}
void iio_device_free(struct iio_dev *indio_dev);
-int devm_iio_device_match(struct device *dev, void *res, void *data);
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
-void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...);
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
-
/**
* iio_buffer_enabled() - helper function to test if the buffer is enabled
* @indio_dev: IIO device structure for device
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index dd8219138c2e..2df67448f0d1 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -83,10 +83,13 @@ struct adis_data {
* @trig: IIO trigger object data
* @data: ADIS chip variant specific data
* @burst: ADIS burst transfer information
+ * @burst_extra_len: Burst extra length. Should only be used by devices that can
+ * dynamically change their burst mode length.
* @state_lock: Lock used by the device to protect state
* @msg: SPI message object
* @xfer: SPI transfer objects to be used for a @msg
* @current_page: Some ADIS devices have registers, this selects current page
+ * @irq_flag: IRQ handling flags as passed to request_irq()
* @buffer: Data buffer for information read from the device
* @tx: DMA safe TX buffer for SPI transfers
* @rx: DMA safe RX buffer for SPI transfers
@@ -97,7 +100,7 @@ struct adis {
const struct adis_data *data;
struct adis_burst *burst;
-
+ unsigned int burst_extra_len;
/**
* The state_lock is meant to be used during operations that require
* a sequence of SPI R/W in order to protect the SPI transfer
@@ -113,6 +116,7 @@ struct adis {
struct spi_message msg;
struct spi_transfer *xfer;
unsigned int current_page;
+ unsigned long irq_flag;
void *buffer;
uint8_t tx[10] ____cacheline_aligned;
@@ -331,6 +335,65 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
return ret;
}
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size);
+/**
+ * adis_update_bits_base() - ADIS Update bits function - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ const u32 mask, const u32 val, u8 size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_update_bits_base(adis, reg, mask, val, size);
+ mutex_unlock(&adis->state_lock);
+ return ret;
+}
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
+ __builtin_choose_expr(sizeof(val) == 4, \
+ adis_update_bits_base(adis, reg, mask, val, 4), \
+ adis_update_bits_base(adis, reg, mask, val, 2)); \
+})
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define __adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
+ __builtin_choose_expr(sizeof(val) == 4, \
+ __adis_update_bits_base(adis, reg, mask, val, 4), \
+ __adis_update_bits_base(adis, reg, mask, val, 2)); \
+})
+
int adis_enable_irq(struct adis *adis, bool enable);
int __adis_check_status(struct adis *adis);
int __adis_initial_startup(struct adis *adis);
@@ -441,18 +504,25 @@ int adis_single_conversion(struct iio_dev *indio_dev,
* @en burst mode enabled
* @reg_cmd register command that triggers burst
* @extra_len extra length to account in the SPI RX buffer
+ * @burst_max_len holds the maximum burst size when the device supports
+ * more than one burst mode with different sizes
*/
struct adis_burst {
bool en;
unsigned int reg_cmd;
- unsigned int extra_len;
+ const u32 extra_len;
+ const u32 burst_max_len;
};
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler);
int adis_setup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
void adis_cleanup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev);
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
void adis_remove_trigger(struct adis *adis);
@@ -461,6 +531,13 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
#else /* CONFIG_IIO_BUFFER */
+static inline int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
+{
+ return 0;
+}
+
static inline int adis_setup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
{
@@ -472,6 +549,12 @@ static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
{
}
+static inline int devm_adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
static inline int adis_probe_trigger(struct adis *adis,
struct iio_dev *indio_dev)
{
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 764659e01b68..1fc1efa7799d 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -9,6 +9,5 @@ struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 84995e2967ac..cad8325903f9 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -141,9 +141,6 @@ int __devm_iio_trigger_register(struct device *dev,
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info);
-
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 238ad30ce166..e99c91799359 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -18,7 +18,5 @@ int devm_iio_triggered_buffer_setup(struct device *dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *ops);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev);
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index aefe758f4466..9164e1534ec9 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,6 +18,7 @@ extern int ima_file_check(struct file *file, int mask);
extern void ima_post_create_tmpfile(struct inode *inode);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
extern int ima_load_data(enum kernel_load_data_id id);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
@@ -70,6 +71,12 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
+static inline int ima_file_mprotect(struct vm_area_struct *vma,
+ unsigned long prot)
+{
+ return 0;
+}
+
static inline int ima_load_data(enum kernel_load_data_id id)
{
return 0;
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644
index 3614a13a8297..000000000000
--- a/include/linux/input/gp2ap002a00f.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
- int vout_gpio;
- bool wakeup;
- int (*hw_setup)(struct i2c_client *client);
- int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 9e409bb13642..3b8580bd33c1 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis)
bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+ input_mt_report_slot_state(dev, 0, false);
+}
+
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
void input_mt_drop_unused(struct input_dev *dev);
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..43e6ea591975
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_copy_from_user - instrument writes of copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 980234ae0312..4100bd224f5c 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -19,6 +19,7 @@
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmar.h>
+#include <linux/ioasid.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -42,6 +43,9 @@
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_XD BIT_ULL(63)
+#define ADDR_WIDTH_5LEVEL (57)
+#define ADDR_WIDTH_4LEVEL (48)
+
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
@@ -166,6 +170,7 @@
#define ecap_smpwc(e) (((e) >> 48) & 0x1)
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_vcs(e) (((e) >> 44) & 0x1)
#define ecap_smts(e) (((e) >> 43) & 0x1)
#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
@@ -191,6 +196,9 @@
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
+/* Virtual command interface capability */
+#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
+
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
@@ -284,6 +292,9 @@
/* PRS_REG */
#define DMA_PRS_PPR ((u32)1)
+#define DMA_PRS_PRO ((u32)2)
+
+#define DMA_VCS_PAS ((u64)1)
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
@@ -324,6 +335,8 @@ enum {
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+#define QI_IWD_FENCE (((u64)1) << 6)
+#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
#define QI_IOTLB_DID(did) (((u64)did) << 16)
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
@@ -331,7 +344,7 @@ enum {
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am))
+#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
#define QI_CC_FM(fm) (((u64)fm) << 48)
#define QI_CC_SID(sid) (((u64)sid) << 32)
@@ -350,16 +363,21 @@ enum {
#define QI_PC_DID(did) (((u64)did) << 16)
#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
-#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
+/* PASID cache invalidation granu */
+#define QI_PC_ALL_PASIDS 0
+#define QI_PC_PASID_SEL 1
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am))
+#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_EIOTLB_DID(did) (((u64)did) << 16)
#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+/* QI Dev-IOTLB inv granu */
+#define QI_DEV_IOTLB_GRAN_ALL 1
+#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
+
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
@@ -480,6 +498,23 @@ struct context_entry {
u64 hi;
};
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
+
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE BIT(2)
+
struct dmar_domain {
int nid; /* node id */
@@ -529,6 +564,7 @@ struct intel_iommu {
u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
+ u64 vccap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
@@ -549,6 +585,8 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct completion prq_complete;
+ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
@@ -571,6 +609,7 @@ struct device_domain_info {
struct list_head auxiliary_domains; /* auxiliary domains
* attached to this device
*/
+ u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
u16 pfsid; /* SRIOV physical function source ID */
@@ -595,6 +634,12 @@ static inline void __iommu_flush_cache(
clflush_cache_range(addr, size);
}
+/* Convert generic struct iommu_domain to private struct dmar_domain */
+static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
/*
* 0: readable
* 1: writable
@@ -653,9 +698,23 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask);
+
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih);
-extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order, u64 granu);
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
+ int pasid);
+
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options);
+/*
+ * Options used in qi_submit_sync:
+ * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
+ */
+#define QI_OPT_WAIT_DRAIN BIT(0)
extern int dmar_ir_support(void);
@@ -667,12 +726,19 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct dmar_domain *find_domain(struct device *dev);
+struct device_domain_info *get_domain_info(struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_SVM
extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-
+int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ struct iommu_gpasid_bind_data *data);
+int intel_svm_unbind_gpasid(struct device *dev, int pasid);
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+void intel_svm_unbind(struct iommu_sva *handle);
+int intel_svm_get_pasid(struct iommu_sva *handle);
struct svm_dev_ops;
struct intel_svm_dev {
@@ -680,6 +746,8 @@ struct intel_svm_dev {
struct rcu_head rcu;
struct device *dev;
struct svm_dev_ops *ops;
+ struct iommu_sva sva;
+ int pasid;
int users;
u16 did;
u16 dev_iotlb:1;
@@ -689,9 +757,11 @@ struct intel_svm_dev {
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
+
struct intel_iommu *iommu;
int flags;
int pasid;
+ int gpasid; /* In case that guest PASID is different from host PASID */
struct list_head devs;
struct list_head list;
};
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index d7c403d0dd27..c9e7e601950d 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -21,7 +21,6 @@ struct svm_dev_ops {
#define SVM_REQ_EXEC (1<<1)
#define SVM_REQ_PRIV (1<<0)
-
/*
* The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
* PASID for the current process. Even if a PASID already exists, a new one
@@ -44,90 +43,17 @@ struct svm_dev_ops {
* do such IOTLB flushes automatically.
*/
#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-
-/**
- * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev: Device to be granted access
- * @pasid: Address for allocated PASID
- * @flags: Flags. Later for requesting supervisor mode, etc.
- * @ops: Callbacks to device driver
- *
- * This function attempts to enable PASID support for the given device.
- * If the @pasid argument is non-%NULL, a PASID is allocated for access
- * to the MM of the current process.
- *
- * By using a %NULL value for the @pasid argument, this function can
- * be used to simply validate that PASID support is available for the
- * given device — i.e. that it is behind an IOMMU which has the
- * requisite support, and is enabled.
- *
- * Page faults are handled transparently by the IOMMU code, and there
- * should be no need for the device driver to be involved. If a page
- * fault cannot be handled (i.e. is an invalid address rather than
- * just needs paging in), then the page request will be completed by
- * the core IOMMU code with appropriate status, and the device itself
- * can then report the resulting fault to its driver via whatever
- * mechanism is appropriate.
- *
- * Multiple calls from the same process may result in the same PASID
- * being re-used. A reference count is kept.
- */
-extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
- struct svm_dev_ops *ops);
-
-/**
- * intel_svm_unbind_mm() - Unbind a specified PASID
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be unbound
- *
- * This function allows a PASID to be retired when the device no
- * longer requires access to the address space of a given process.
- *
- * If the use count for the PASID in question reaches zero, the
- * PASID is revoked and may no longer be used by hardware.
- *
- * Device drivers are required to ensure that no access (including
- * page requests) is currently outstanding for the PASID in question,
- * before calling this function.
+/*
+ * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
+ * processes. Compared to the host bind, the primary differences are:
+ * 1. mm life cycle management
+ * 2. fault reporting
*/
-extern int intel_svm_unbind_mm(struct device *dev, int pasid);
-
-/**
- * intel_svm_is_pasid_valid() - check if pasid is valid
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be checked
- *
- * This function checks if the specified pasid is still valid. A
- * valid pasid means the backing mm is still having a valid user.
- * For kernel callers init_mm is always valid. for other mm, if mm->mm_users
- * is non-zero, it is valid.
- *
- * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
- * 1 if pasid is valid.
+#define SVM_FLAG_GUEST_MODE (1<<2)
+/*
+ * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
+ * which requires guest and host PASID translation at both directions.
*/
-extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
-
-#else /* CONFIG_INTEL_IOMMU_SVM */
-
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
- int flags, struct svm_dev_ops *ops)
-{
- return -ENOSYS;
-}
-
-static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
-{
- BUG();
-}
-
-static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
+#define SVM_FLAG_GUEST_PASID (1<<3)
#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index d70a914cba11..3a63d98613fc 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -28,9 +28,14 @@ struct device;
struct icc_path *icc_get(struct device *dev, const int src_id,
const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
void icc_put(struct icc_path *path);
+int icc_enable(struct icc_path *path);
+int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
#else
@@ -46,10 +51,31 @@ static inline struct icc_path *of_icc_get(struct device *dev,
return NULL;
}
+static inline struct icc_path *devm_of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
+{
+ return NULL;
+}
+
static inline void icc_put(struct icc_path *path)
{
}
+static inline int icc_enable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_disable(struct icc_path *path)
+{
+ return 0;
+}
+
static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
{
return 0;
@@ -59,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag)
{
}
+static inline const char *icc_get_name(struct icc_path *path)
+{
+ return NULL;
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 80f637c3a6f3..5db970b6615a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -760,8 +760,10 @@ extern int arch_early_irq_init(void);
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-#define __softirq_entry \
- __attribute__((__section__(".softirqentry.text")))
+#ifndef __irq_entry
+# define __irq_entry __attribute__((__section__(".irqentry.text")))
+#endif
+
+#define __softirq_entry __attribute__((__section__(".softirqentry.text")))
#endif
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index b336622612f3..0beaa3eba155 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
/*
@@ -99,7 +100,6 @@ io_mapping_unmap(void __iomem *vaddr)
#else
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index a5c219c29b10..4d1d3c3469e9 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -177,7 +177,7 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len, const struct iomap_ops *ops);
+ u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ef8b0bda695..5f0b7859d2eb 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -53,8 +53,6 @@ struct iommu_fault_event;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
- void *);
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
@@ -171,25 +169,6 @@ enum iommu_dev_features {
#define IOMMU_PASID_INVALID (-1U)
-/**
- * struct iommu_sva_ops - device driver callbacks for an SVA context
- *
- * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
- * @mm_exit returns, the device must not issue any more transaction
- * with the PASID given as argument.
- *
- * The @mm_exit handler is allowed to sleep. Be careful about the
- * locks taken in @mm_exit, because they might lead to deadlocks if
- * they are also held when dropping references to the mm. Consider the
- * following call chain:
- * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
- * Using mmput_async() prevents this scenario.
- *
- */
-struct iommu_sva_ops {
- iommu_mm_exit_handler_t mm_exit;
-};
-
#ifdef CONFIG_IOMMU_API
/**
@@ -223,8 +202,10 @@ struct iommu_iotlb_gather {
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
- * @add_device: add device to iommu grouping
- * @remove_device: remove device from iommu grouping
+ * @probe_device: Add device to iommu driver handling
+ * @release_device: Remove device from iommu driver handling
+ * @probe_finalize: Do final setup work after the device is added to an IOMMU
+ * group and attached to the groups domain
* @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
@@ -248,6 +229,10 @@ struct iommu_iotlb_gather {
* @cache_invalidate: invalidate translation caches
* @sva_bind_gpasid: bind guest pasid and mm
* @sva_unbind_gpasid: unbind guest pasid and mm
+ * @def_domain_type: device default domain type, return value:
+ * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
+ * - IOMMU_DOMAIN_DMA: must use a dma domain
+ * - 0: use the default setting
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
*/
@@ -269,8 +254,9 @@ struct iommu_ops {
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*add_device)(struct device *dev);
- void (*remove_device)(struct device *dev);
+ struct iommu_device *(*probe_device)(struct device *dev);
+ void (*release_device)(struct device *dev);
+ void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
int (*domain_get_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
@@ -318,6 +304,8 @@ struct iommu_ops {
int (*sva_unbind_gpasid)(struct device *dev, int pasid);
+ int (*def_domain_type)(struct device *dev);
+
unsigned long pgsize_bitmap;
struct module *owner;
};
@@ -369,6 +357,7 @@ struct iommu_fault_param {
*
* @fault_param: IOMMU detected device fault reporting data
* @fwspec: IOMMU fwspec data
+ * @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
@@ -378,6 +367,7 @@ struct dev_iommu {
struct mutex lock;
struct iommu_fault_param *fault_param;
struct iommu_fwspec *fwspec;
+ struct iommu_device *iommu_dev;
void *priv;
};
@@ -430,6 +420,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
+extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
@@ -466,12 +457,26 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,
struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
-extern int iommu_request_dma_domain_for_dev(struct device *dev);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
@@ -515,7 +520,6 @@ extern int iommu_page_response(struct device *dev,
struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group);
-extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
@@ -605,7 +609,6 @@ struct iommu_fwspec {
*/
struct iommu_sva {
struct device *dev;
- const struct iommu_sva_ops *ops;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -653,8 +656,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm,
void *drvdata);
void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops);
int iommu_sva_get_pasid(struct iommu_sva *handle);
#else /* CONFIG_IOMMU_API */
@@ -793,16 +794,6 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group,
return -ENODEV;
}
-static inline int iommu_request_dm_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iommu_request_dma_domain_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
static inline void iommu_set_default_passthrough(bool cmd_line)
{
}
@@ -1058,12 +1049,6 @@ static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
{
}
-static inline int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops)
-{
- return -EINVAL;
-}
-
static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
{
return IOMMU_PASID_INVALID;
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index a9b9170b5dd2..6c2b06fe8beb 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -103,6 +103,7 @@ struct resource {
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+#define IORESOURCE_MEM_DRIVER_MANAGED (1<<7)
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
@@ -301,5 +302,11 @@ struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+#ifdef CONFIG_IO_STRICT_DEVMEM
+void revoke_devmem(struct resource *res);
+#else
+static inline void revoke_devmem(struct resource *res) { };
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c309f43bde45..a06a78c67f19 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -68,6 +68,8 @@ struct ipc_namespace {
struct user_namespace *user_ns;
struct ucounts *ucounts;
+ struct llist_node mnt_llist;
+
struct ns_common ns;
} __randomize_layout;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index d7f7e436c3af..6384d2813ded 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -32,7 +32,7 @@
#ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on_prepare(void);
- extern void trace_hardirqs_off_prepare(void);
+ extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context(p) ((p)->hardirq_context)
@@ -101,7 +101,7 @@ do { \
#else
# define trace_hardirqs_on_prepare() do { } while (0)
-# define trace_hardirqs_off_prepare() do { } while (0)
+# define trace_hardirqs_off_finish() do { } while (0)
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
# define lockdep_hardirq_context(p) 0
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f613d8529863..d56128df2aff 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -766,6 +766,11 @@ struct journal_s
int j_errno;
/**
+ * @j_abort_mutex: Lock the whole aborting procedure.
+ */
+ struct mutex j_abort_mutex;
+
+ /**
* @j_sb_buffer: The first part of the superblock buffer.
*/
struct buffer_head *j_sb_buffer;
@@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
-#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
/*
* Function declarations for the journaling transaction and buffer
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 657a83b943f0..98338dc6b5d2 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -165,9 +165,9 @@ static inline int kallsyms_show_value(void)
#endif /*CONFIG_KALLSYMS*/
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
- printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
+ printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 31314ca7c635..82522e996c76 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -11,8 +11,8 @@ struct task_struct;
#ifdef CONFIG_KASAN
+#include <linux/pgtable.h>
#include <asm/kasan.h>
-#include <asm/pgtable.h>
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index cc8fa109cfa3..9d12c970f18f 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_KCONFIG_H
#define __LINUX_KCONFIG_H
+/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
+
#include <generated/autoconf.h>
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..7b0b9c44f5f3
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/*
+ * ACCESS TYPE MODIFIERS
+ *
+ * <none>: normal read access;
+ * WRITE : write access;
+ * ATOMIC: access is atomic;
+ * ASSERT: access is not a regular access, but an assertion;
+ * SCOPED: access is a scoped access;
+ */
+#define KCSAN_ACCESS_WRITE 0x1
+#define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4
+#define KCSAN_ACCESS_SCOPED 0x8
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ struct list_head list;
+ const volatile void *ptr;
+ size_t size;
+ int type;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..53340d8789f9
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses. */
+ struct list_head scoped_accesses;
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9b7a8d74a9d6..82d91547d122 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -520,6 +520,12 @@ static inline u32 int_sqrt64(u64 x)
}
#endif
+#ifdef CONFIG_SMP
+extern unsigned int sysctl_oops_all_cpu_backtrace;
+#else
+#define sysctl_oops_all_cpu_backtrace 0
+#endif /* CONFIG_SMP */
+
extern void bust_spinlocks(int yes);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern int panic_timeout;
@@ -528,6 +534,8 @@ extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
extern int sysctl_panic_on_rcu_stall;
extern int sysctl_panic_on_stackoverflow;
@@ -596,6 +604,7 @@ extern enum system_states {
#define TAINT_AUX 16
#define TAINT_RANDSTRUCT 17
#define TAINT_FLAGS_COUNT 18
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
struct taint_flag {
char c_true; /* character printed when tainted */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 1776eb2e43a4..ea67910ae6b7 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -208,7 +208,7 @@ struct crash_mem_range {
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
- struct crash_mem_range ranges[0];
+ struct crash_mem_range ranges[];
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
diff --git a/include/linux/key.h b/include/linux/key.h
index 6cf8e71cf8b7..0f2e24f13c2b 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -71,6 +71,23 @@ struct net;
#define KEY_PERM_UNDEF 0xffffffff
+/*
+ * The permissions required on a key that we're looking up.
+ */
+enum key_need_perm {
+ KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */
+ KEY_NEED_VIEW, /* Require permission to view attributes */
+ KEY_NEED_READ, /* Require permission to read content */
+ KEY_NEED_WRITE, /* Require permission to update / modify */
+ KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */
+ KEY_NEED_LINK, /* Require permission to link */
+ KEY_NEED_SETATTR, /* Require permission to change attributes */
+ KEY_NEED_UNLINK, /* Require permission to unlink key */
+ KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */
+ KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */
+ KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -176,6 +193,9 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct watch_list *watchers; /* Entities watching this key for changes */
+#endif
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
@@ -417,20 +437,9 @@ static inline key_serial_t key_serial(const struct key *key)
extern void key_set_timeout(struct key *, unsigned);
extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
- key_perm_t perm);
+ enum key_need_perm need_perm);
extern void key_free_user_ns(struct user_namespace *);
-/*
- * The permissions required on a key that we're looking up.
- */
-#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */
-#define KEY_NEED_READ 0x02 /* Require permission to read content */
-#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */
-#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */
-#define KEY_NEED_LINK 0x10 /* Require permission to link */
-#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
-#define KEY_NEED_ALL 0x3f /* All the above permissions */
-
static inline short key_read_state(const struct key *key)
{
/* Barrier versus mark_key_instantiated(). */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index fc8d83e91379..6cba088bee24 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -29,7 +29,7 @@
#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
-#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 594265bfd390..e210af75ee38 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -161,7 +161,7 @@ struct kretprobe_instance {
kprobe_opcode_t *ret_addr;
struct task_struct *task;
void *fp;
- char data[0];
+ char data[];
};
struct kretprobe_blackpoint {
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8bbcaad7ef0f..65b81e0c494d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -5,6 +5,8 @@
#include <linux/err.h>
#include <linux/sched.h>
+struct mm_struct;
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -57,6 +59,7 @@ bool kthread_should_stop(void);
bool kthread_should_park(void);
bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
@@ -198,6 +201,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
+
struct cgroup_subsys_state;
#ifdef CONFIG_BLK_CGROUP
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f43b59b1294c..d564855243d8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -206,6 +206,7 @@ struct kvm_async_pf {
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
+ bool notpresent_injected;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -318,7 +319,6 @@ struct kvm_vcpu {
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
- struct dentry *debugfs_dentry;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -409,7 +409,7 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[0];
+ struct hlist_head map[];
};
#endif
@@ -888,7 +888,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
-void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
#endif
int kvm_arch_hardware_enable(void);
@@ -1421,8 +1421,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable);
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index af832852e620..8bf5e59a7859 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -609,7 +609,7 @@ struct ata_host {
struct task_struct *eh_owner;
struct ata_port *simplex_claimed; /* channel owning the DMA */
- struct ata_port *ports[0];
+ struct ata_port *ports[];
};
struct ata_queued_cmd {
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index e894e74905f3..2614247a9781 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -195,9 +195,6 @@ struct klp_patch {
int klp_enable_patch(struct klp_patch *);
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj);
-
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
@@ -234,6 +231,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id);
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
@@ -242,6 +244,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
+static inline
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname)
+{
+ return 0;
+}
+
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 99d629fd9944..28f23b341c1c 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -75,6 +75,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IBPKEY 13
#define LSM_AUDIT_DATA_IBENDPORT 14
#define LSM_AUDIT_DATA_LOCKDOWN 15
+#define LSM_AUDIT_DATA_NOTIFICATION 16
union {
struct path path;
struct dentry *dentry;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 5616b2567aa7..6791813cd439 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -49,7 +49,8 @@ LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
-LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
@@ -190,6 +191,8 @@ LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
loff_t size, enum kernel_read_file_id id)
LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
int flags)
+LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
+ int flags)
LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
@@ -253,6 +256,15 @@ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
u32 *ctxlen)
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
+ const struct cred *cred, struct watch_notification *n)
+#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+LSM_HOOK(int, 0, watch_key, struct key *key)
+#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+
#ifdef CONFIG_SECURITY_NETWORK
LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
struct sock *newsk)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 44d5422c18e4..95b7c1d32062 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -34,40 +34,48 @@
*
* Security hooks for program execution operations.
*
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it. The hook must set @bprm->secureexec to 1 if a "secure
- * exec" has happened as a result of this hook call. The flag is used to
- * indicate the need for a sanitized execution environment, and is also
- * passed in the ELF auxiliary table on the initial stack to indicate
- * whether libc should enable secure mode.
+ * @bprm_creds_for_exec:
+ * If the setup in prepare_exec_creds did not setup @bprm->cred->security
+ * properly for executing @bprm->file, update the LSM's portion of
+ * @bprm->cred->security to be what commit_creds needs to install for the
+ * new program. This hook may also optionally check permissions
+ * (e.g. for transitions between security domains).
+ * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
+ * request libc enable secure mode.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_creds_from_file:
+ * If @file is setpcap, suid, sgid or otherwise marked to change
+ * privilege upon exec, update @bprm->cred to reflect that change.
+ * This is called after finding the binary that will be executed.
+ * without an interpreter. This ensures that the credentials will not
+ * be derived from a script that the binary will need to reopen, which
+ * when reopend may end up being a completely different file. This
+ * hook may also optionally check permissions (e.g. for transitions
+ * between security domains).
+ * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
+ * request libc enable secure mode.
+ * The hook must add to @bprm->per_clear any personality flags that
+ * should be cleared from current->personality.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_check_security:
* This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
+ * begin. It allows a check against the @bprm->cred->security value
+ * which was set in the preceding creds_for_exec call. The argv list and
+ * envp list are reliably available in @bprm. This hook may be called
+ * multiple times during a single execve.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_committing_creds:
* Prepare to install the new security attributes of a process being
* transformed by an execve operation, based on the old credentials
* pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
+ * the bprm_creds_for_exec hook. @bprm points to the linux_binprm
+ * structure. This hook is a good place to perform state changes on the
+ * process such as closing open file descriptors to which access will no
+ * longer be granted when the attributes are changed. This is called
+ * immediately before commit_creds().
* @bprm_committed_creds:
* Tidy up after the installation of the new security attributes of a
* process being transformed by an execve operation. The new credentials
@@ -651,6 +659,15 @@
* @old is the set of credentials that are being replaces
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
+ * @task_fix_setgid:
+ * Update the module's state after setting one or more of the group
+ * identity attributes of the current process. The @flags parameter
+ * indicates which of the set*gid system calls invoked this hook.
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaced.
+ * @flags contains one of the LSM_SETID_* values.
+ * Return 0 on success.
* @task_setpgid:
* Check permission before setting the process group identifier of the
* process @p to @pgid.
@@ -1437,6 +1454,20 @@
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
*
+ * Security hooks for the general notification queue:
+ *
+ * @post_notification:
+ * Check to see if a watch notification can be posted to a particular
+ * queue.
+ * @w_cred: The credentials of the whoever set the watch.
+ * @cred: The event-triggerer's credentials
+ * @n: The notification being posted
+ *
+ * @watch_key:
+ * Check to see if a process is allowed to watch for event notifications
+ * from a key or keyring.
+ * @key: The key to watch.
+ *
* Security hooks for using the eBPF maps and programs functionalities through
* eBPF syscalls.
*
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 6bc37a731d27..017fae833d4a 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -41,7 +41,7 @@ enum memblock_flags {
/**
* struct memblock_region - represents a memory region
- * @base: physical address of the region
+ * @base: base address of the region
* @size: size of the region
* @flags: memory region attributes
* @nid: NUMA node id
@@ -50,7 +50,7 @@ struct memblock_region {
phys_addr_t base;
phys_addr_t size;
enum memblock_flags flags;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+#ifdef CONFIG_NEED_MULTIPLE_NODES
int nid;
#endif
};
@@ -75,7 +75,7 @@ struct memblock_type {
* struct memblock - memblock allocator metadata
* @bottom_up: is bottom up direction?
* @current_limit: physical address of the current allocation limit
- * @memory: usabe memory regions
+ * @memory: usable memory regions
* @reserved: reserved memory regions
* @physmem: all physical memory
*/
@@ -215,7 +215,6 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -234,7 +233,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
@@ -275,6 +273,9 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
for (; i != U64_MAX; \
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+
+int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
+
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/**
@@ -310,10 +311,10 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid)
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -332,7 +333,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bfe9533bb67e..e77197a62809 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -29,10 +29,7 @@ struct kmem_cache;
/* Cgroup-specific page state, on top of universal node page state */
enum memcg_stat_item {
- MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
- MEMCG_RSS,
- MEMCG_RSS_HUGE,
- MEMCG_SWAP,
+ MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
/* XXX: why are these zone and not node counters? */
MEMCG_KERNEL_STACK_KB,
@@ -358,16 +355,8 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg);
-int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
- bool compound);
-int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
- bool compound);
-void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
- bool lrucare, bool compound);
-void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
- bool compound);
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
+
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -568,7 +557,7 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
#ifdef CONFIG_MEMCG_SWAP
-extern int do_swap_account;
+extern bool cgroup_memory_noswap;
#endif
struct mem_cgroup *lock_page_memcg(struct page *page);
@@ -708,16 +697,17 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
static inline void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
+ struct page *head = compound_head(page); /* rmap on tail pages */
pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;
/* Untracked pages have no memcg, no lruvec. Update only the node */
- if (!page->mem_cgroup) {
+ if (!head->mem_cgroup) {
__mod_node_page_state(pgdat, idx, val);
return;
}
- lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
+ lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
__mod_lruvec_state(lruvec, idx, val);
}
@@ -847,37 +837,12 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(
return MEMCG_PROT_NONE;
}
-static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp,
- bool compound)
-{
- *memcgp = NULL;
- return 0;
-}
-
-static inline int mem_cgroup_try_charge_delay(struct page *page,
- struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp,
- bool compound)
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask)
{
- *memcgp = NULL;
return 0;
}
-static inline void mem_cgroup_commit_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool lrucare, bool compound)
-{
-}
-
-static inline void mem_cgroup_cancel_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool compound)
-{
-}
-
static inline void mem_cgroup_uncharge(struct page *page)
{
}
@@ -1277,6 +1242,19 @@ static inline void dec_lruvec_page_state(struct page *page,
mod_lruvec_page_state(page, idx, -1);
}
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = lruvec_memcg(lruvec);
+ if (!memcg)
+ return NULL;
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ return NULL;
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
+}
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 93d9ada74ddd..375515803cd8 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -314,19 +314,13 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern int remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
+extern int offline_and_remove_memory(int nid, u64 start, u64 size);
#else
-static inline bool is_mem_section_removable(unsigned long pfn,
- unsigned long nr_pages)
-{
- return false;
-}
-
static inline void try_offline_node(int nid) {}
static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
@@ -349,6 +343,8 @@ extern void __ref free_area_init_core_hotplug(int nid);
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
+extern int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern void remove_pfn_range_from_zone(struct zone *zone,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8165278c348a..ea9c15b60a96 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -31,7 +31,7 @@ struct mm_struct;
* Locking policy for interlave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
- * mmap_sem.
+ * mmap_lock.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index d01d1299e49d..ab76cdd06199 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -70,11 +70,11 @@ struct mfd_cell {
size_t pdata_size;
/* device properties passed to the sub devices drivers */
- struct property_entry *properties;
+ const struct property_entry *properties;
/*
* Device Tree compatible string
- * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details
*/
const char *of_compatible;
diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h
new file mode 100644
index 000000000000..89b706900b57
--- /dev/null
+++ b/include/linux/mfd/mp2629.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Monolithic Power Systems, Inc
+ */
+
+#ifndef __MP2629_H__
+#define __MP2629_H__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct mp2629_data {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+enum mp2629_adc_chan {
+ MP2629_BATT_VOLT,
+ MP2629_SYSTEM_VOLT,
+ MP2629_INPUT_VOLT,
+ MP2629_BATT_CURRENT,
+ MP2629_INPUT_CURRENT,
+ MP2629_ADC_CHAN_END
+};
+
+#endif
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
new file mode 100644
index 000000000000..c5a11b7458d4
--- /dev/null
+++ b/include/linux/mfd/mt6358/core.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_CORE_H__
+#define __MFD_MT6358_CORE_H__
+
+#define MT6358_REG_WIDTH 16
+
+struct irq_top_t {
+ int hwirq_base;
+ unsigned int num_int_regs;
+ unsigned int num_int_bits;
+ unsigned int en_reg;
+ unsigned int en_reg_shift;
+ unsigned int sta_reg;
+ unsigned int sta_reg_shift;
+ unsigned int top_offset;
+};
+
+struct pmic_irq_data {
+ unsigned int num_top;
+ unsigned int num_pmic_irqs;
+ unsigned short top_int_status_reg;
+ bool *enable_hwirq;
+ bool *cache_hwirq;
+};
+
+enum mt6358_irq_top_status_shift {
+ MT6358_BUCK_TOP = 0,
+ MT6358_LDO_TOP,
+ MT6358_PSC_TOP,
+ MT6358_SCK_TOP,
+ MT6358_BM_TOP,
+ MT6358_HK_TOP,
+ MT6358_AUD_TOP,
+ MT6358_MISC_TOP,
+};
+
+enum mt6358_irq_numbers {
+ MT6358_IRQ_VPROC11_OC = 0,
+ MT6358_IRQ_VPROC12_OC,
+ MT6358_IRQ_VCORE_OC,
+ MT6358_IRQ_VGPU_OC,
+ MT6358_IRQ_VMODEM_OC,
+ MT6358_IRQ_VDRAM1_OC,
+ MT6358_IRQ_VS1_OC,
+ MT6358_IRQ_VS2_OC,
+ MT6358_IRQ_VPA_OC,
+ MT6358_IRQ_VCORE_PREOC,
+ MT6358_IRQ_VFE28_OC = 16,
+ MT6358_IRQ_VXO22_OC,
+ MT6358_IRQ_VRF18_OC,
+ MT6358_IRQ_VRF12_OC,
+ MT6358_IRQ_VEFUSE_OC,
+ MT6358_IRQ_VCN33_OC,
+ MT6358_IRQ_VCN28_OC,
+ MT6358_IRQ_VCN18_OC,
+ MT6358_IRQ_VCAMA1_OC,
+ MT6358_IRQ_VCAMA2_OC,
+ MT6358_IRQ_VCAMD_OC,
+ MT6358_IRQ_VCAMIO_OC,
+ MT6358_IRQ_VLDO28_OC,
+ MT6358_IRQ_VA12_OC,
+ MT6358_IRQ_VAUX18_OC,
+ MT6358_IRQ_VAUD28_OC,
+ MT6358_IRQ_VIO28_OC,
+ MT6358_IRQ_VIO18_OC,
+ MT6358_IRQ_VSRAM_PROC11_OC,
+ MT6358_IRQ_VSRAM_PROC12_OC,
+ MT6358_IRQ_VSRAM_OTHERS_OC,
+ MT6358_IRQ_VSRAM_GPU_OC,
+ MT6358_IRQ_VDRAM2_OC,
+ MT6358_IRQ_VMC_OC,
+ MT6358_IRQ_VMCH_OC,
+ MT6358_IRQ_VEMC_OC,
+ MT6358_IRQ_VSIM1_OC,
+ MT6358_IRQ_VSIM2_OC,
+ MT6358_IRQ_VIBR_OC,
+ MT6358_IRQ_VUSB_OC,
+ MT6358_IRQ_VBIF28_OC,
+ MT6358_IRQ_PWRKEY = 48,
+ MT6358_IRQ_HOMEKEY,
+ MT6358_IRQ_PWRKEY_R,
+ MT6358_IRQ_HOMEKEY_R,
+ MT6358_IRQ_NI_LBAT_INT,
+ MT6358_IRQ_CHRDET,
+ MT6358_IRQ_CHRDET_EDGE,
+ MT6358_IRQ_VCDT_HV_DET,
+ MT6358_IRQ_RTC = 64,
+ MT6358_IRQ_FG_BAT0_H = 80,
+ MT6358_IRQ_FG_BAT0_L,
+ MT6358_IRQ_FG_CUR_H,
+ MT6358_IRQ_FG_CUR_L,
+ MT6358_IRQ_FG_ZCV,
+ MT6358_IRQ_FG_BAT1_H,
+ MT6358_IRQ_FG_BAT1_L,
+ MT6358_IRQ_FG_N_CHARGE_L,
+ MT6358_IRQ_FG_IAVG_H,
+ MT6358_IRQ_FG_IAVG_L,
+ MT6358_IRQ_FG_TIME_H,
+ MT6358_IRQ_FG_DISCHARGE,
+ MT6358_IRQ_FG_CHARGE,
+ MT6358_IRQ_BATON_LV = 96,
+ MT6358_IRQ_BATON_HT,
+ MT6358_IRQ_BATON_BAT_IN,
+ MT6358_IRQ_BATON_BAT_OUT,
+ MT6358_IRQ_BIF,
+ MT6358_IRQ_BAT_H = 112,
+ MT6358_IRQ_BAT_L,
+ MT6358_IRQ_BAT2_H,
+ MT6358_IRQ_BAT2_L,
+ MT6358_IRQ_BAT_TEMP_H,
+ MT6358_IRQ_BAT_TEMP_L,
+ MT6358_IRQ_AUXADC_IMP,
+ MT6358_IRQ_NAG_C_DLTV,
+ MT6358_IRQ_AUDIO = 128,
+ MT6358_IRQ_ACCDET = 133,
+ MT6358_IRQ_ACCDET_EINT0,
+ MT6358_IRQ_ACCDET_EINT1,
+ MT6358_IRQ_SPI_CMD_ALERT = 144,
+ MT6358_IRQ_NR,
+};
+
+#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC
+#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC
+#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY
+#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC
+#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H
+#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H
+#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO
+#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT
+
+#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1)
+#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1)
+#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1)
+#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1)
+#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1)
+#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1)
+#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1)
+#define MT6358_IRQ_MISC_BITS \
+ (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1)
+
+#define MT6358_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6358_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \
+ .num_int_bits = MT6358_IRQ_##sp##_BITS, \
+ .en_reg = MT6358_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6358_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6358_CORE_H__ */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
new file mode 100644
index 000000000000..2ad0b312aa28
--- /dev/null
+++ b/include/linux/mfd/mt6358/registers.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_REGISTERS_H__
+#define __MFD_MT6358_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6358_SWCID 0xa
+#define MT6358_MISC_TOP_INT_CON0 0x188
+#define MT6358_MISC_TOP_INT_STATUS0 0x194
+#define MT6358_TOP_INT_STATUS0 0x19e
+#define MT6358_SCK_TOP_INT_CON0 0x52e
+#define MT6358_SCK_TOP_INT_STATUS0 0x53a
+#define MT6358_EOSC_CALI_CON0 0x540
+#define MT6358_EOSC_CALI_CON1 0x542
+#define MT6358_RTC_MIX_CON0 0x544
+#define MT6358_RTC_MIX_CON1 0x546
+#define MT6358_RTC_MIX_CON2 0x548
+#define MT6358_RTC_DSN_ID 0x580
+#define MT6358_RTC_DSN_REV0 0x582
+#define MT6358_RTC_DBI 0x584
+#define MT6358_RTC_DXI 0x586
+#define MT6358_RTC_BBPU 0x588
+#define MT6358_RTC_IRQ_STA 0x58a
+#define MT6358_RTC_IRQ_EN 0x58c
+#define MT6358_RTC_CII_EN 0x58e
+#define MT6358_RTC_AL_MASK 0x590
+#define MT6358_RTC_TC_SEC 0x592
+#define MT6358_RTC_TC_MIN 0x594
+#define MT6358_RTC_TC_HOU 0x596
+#define MT6358_RTC_TC_DOM 0x598
+#define MT6358_RTC_TC_DOW 0x59a
+#define MT6358_RTC_TC_MTH 0x59c
+#define MT6358_RTC_TC_YEA 0x59e
+#define MT6358_RTC_AL_SEC 0x5a0
+#define MT6358_RTC_AL_MIN 0x5a2
+#define MT6358_RTC_AL_HOU 0x5a4
+#define MT6358_RTC_AL_DOM 0x5a6
+#define MT6358_RTC_AL_DOW 0x5a8
+#define MT6358_RTC_AL_MTH 0x5aa
+#define MT6358_RTC_AL_YEA 0x5ac
+#define MT6358_RTC_OSC32CON 0x5ae
+#define MT6358_RTC_POWERKEY1 0x5b0
+#define MT6358_RTC_POWERKEY2 0x5b2
+#define MT6358_RTC_PDN1 0x5b4
+#define MT6358_RTC_PDN2 0x5b6
+#define MT6358_RTC_SPAR0 0x5b8
+#define MT6358_RTC_SPAR1 0x5ba
+#define MT6358_RTC_PROT 0x5bc
+#define MT6358_RTC_DIFF 0x5be
+#define MT6358_RTC_CALI 0x5c0
+#define MT6358_RTC_WRTGR 0x5c2
+#define MT6358_RTC_CON 0x5c4
+#define MT6358_RTC_SEC_CTRL 0x5c6
+#define MT6358_RTC_INT_CNT 0x5c8
+#define MT6358_RTC_SEC_DAT0 0x5ca
+#define MT6358_RTC_SEC_DAT1 0x5cc
+#define MT6358_RTC_SEC_DAT2 0x5ce
+#define MT6358_RTC_SEC_DSN_ID 0x600
+#define MT6358_RTC_SEC_DSN_REV0 0x602
+#define MT6358_RTC_SEC_DBI 0x604
+#define MT6358_RTC_SEC_DXI 0x606
+#define MT6358_RTC_TC_SEC_SEC 0x608
+#define MT6358_RTC_TC_MIN_SEC 0x60a
+#define MT6358_RTC_TC_HOU_SEC 0x60c
+#define MT6358_RTC_TC_DOM_SEC 0x60e
+#define MT6358_RTC_TC_DOW_SEC 0x610
+#define MT6358_RTC_TC_MTH_SEC 0x612
+#define MT6358_RTC_TC_YEA_SEC 0x614
+#define MT6358_RTC_SEC_CK_PDN 0x616
+#define MT6358_RTC_SEC_WRTGR 0x618
+#define MT6358_PSC_TOP_INT_CON0 0x910
+#define MT6358_PSC_TOP_INT_STATUS0 0x91c
+#define MT6358_BM_TOP_INT_CON0 0xc32
+#define MT6358_BM_TOP_INT_CON1 0xc38
+#define MT6358_BM_TOP_INT_STATUS0 0xc4a
+#define MT6358_BM_TOP_INT_STATUS1 0xc4c
+#define MT6358_HK_TOP_INT_CON0 0xf92
+#define MT6358_HK_TOP_INT_STATUS0 0xf9e
+#define MT6358_BUCK_TOP_INT_CON0 0x1318
+#define MT6358_BUCK_TOP_INT_STATUS0 0x1324
+#define MT6358_BUCK_VPROC11_CON0 0x1388
+#define MT6358_BUCK_VPROC11_DBG0 0x139e
+#define MT6358_BUCK_VPROC11_DBG1 0x13a0
+#define MT6358_BUCK_VPROC11_ELR0 0x13a6
+#define MT6358_BUCK_VPROC12_CON0 0x1408
+#define MT6358_BUCK_VPROC12_DBG0 0x141e
+#define MT6358_BUCK_VPROC12_DBG1 0x1420
+#define MT6358_BUCK_VPROC12_ELR0 0x1426
+#define MT6358_BUCK_VCORE_CON0 0x1488
+#define MT6358_BUCK_VCORE_DBG0 0x149e
+#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_ELR0 0x14aa
+#define MT6358_BUCK_VGPU_CON0 0x1508
+#define MT6358_BUCK_VGPU_DBG0 0x151e
+#define MT6358_BUCK_VGPU_DBG1 0x1520
+#define MT6358_BUCK_VGPU_ELR0 0x1526
+#define MT6358_BUCK_VMODEM_CON0 0x1588
+#define MT6358_BUCK_VMODEM_DBG0 0x159e
+#define MT6358_BUCK_VMODEM_DBG1 0x15a0
+#define MT6358_BUCK_VMODEM_ELR0 0x15a6
+#define MT6358_BUCK_VDRAM1_CON0 0x1608
+#define MT6358_BUCK_VDRAM1_DBG0 0x161e
+#define MT6358_BUCK_VDRAM1_DBG1 0x1620
+#define MT6358_BUCK_VDRAM1_ELR0 0x1626
+#define MT6358_BUCK_VS1_CON0 0x1688
+#define MT6358_BUCK_VS1_DBG0 0x169e
+#define MT6358_BUCK_VS1_DBG1 0x16a0
+#define MT6358_BUCK_VS1_ELR0 0x16ae
+#define MT6358_BUCK_VS2_CON0 0x1708
+#define MT6358_BUCK_VS2_DBG0 0x171e
+#define MT6358_BUCK_VS2_DBG1 0x1720
+#define MT6358_BUCK_VS2_ELR0 0x172e
+#define MT6358_BUCK_VPA_CON0 0x1788
+#define MT6358_BUCK_VPA_CON1 0x178a
+#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1
+#define MT6358_BUCK_VPA_DBG0 0x1792
+#define MT6358_BUCK_VPA_DBG1 0x1794
+#define MT6358_VPROC_ANA_CON0 0x180c
+#define MT6358_VCORE_VGPU_ANA_CON0 0x1828
+#define MT6358_VMODEM_ANA_CON0 0x1888
+#define MT6358_VDRAM1_ANA_CON0 0x1896
+#define MT6358_VS1_ANA_CON0 0x18a2
+#define MT6358_VS2_ANA_CON0 0x18ae
+#define MT6358_VPA_ANA_CON0 0x18ba
+#define MT6358_LDO_TOP_INT_CON0 0x1a50
+#define MT6358_LDO_TOP_INT_CON1 0x1a56
+#define MT6358_LDO_TOP_INT_STATUS0 0x1a68
+#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a
+#define MT6358_LDO_VXO22_CON0 0x1a88
+#define MT6358_LDO_VXO22_CON1 0x1a96
+#define MT6358_LDO_VA12_CON0 0x1a9c
+#define MT6358_LDO_VA12_CON1 0x1aaa
+#define MT6358_LDO_VAUX18_CON0 0x1ab0
+#define MT6358_LDO_VAUX18_CON1 0x1abe
+#define MT6358_LDO_VAUD28_CON0 0x1ac4
+#define MT6358_LDO_VAUD28_CON1 0x1ad2
+#define MT6358_LDO_VIO28_CON0 0x1ad8
+#define MT6358_LDO_VIO28_CON1 0x1ae6
+#define MT6358_LDO_VIO18_CON0 0x1aec
+#define MT6358_LDO_VIO18_CON1 0x1afa
+#define MT6358_LDO_VDRAM2_CON0 0x1b08
+#define MT6358_LDO_VDRAM2_CON1 0x1b16
+#define MT6358_LDO_VEMC_CON0 0x1b1c
+#define MT6358_LDO_VEMC_CON1 0x1b2a
+#define MT6358_LDO_VUSB_CON0_0 0x1b30
+#define MT6358_LDO_VUSB_CON1 0x1b40
+#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46
+#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60
+#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72
+#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74
+#define MT6358_LDO_GON1_ELR_NUM 0x1b76
+#define MT6358_LDO_VDRAM2_ELR0 0x1b78
+#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88
+#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2
+#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4
+#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
+#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
+#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
+#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
+#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
+#define MT6358_LDO_VSRAM_CON0 0x1bee
+#define MT6358_LDO_VSRAM_CON1 0x1bf0
+#define MT6358_LDO_VSRAM_CON2 0x1bf2
+#define MT6358_LDO_VSRAM_CON3 0x1bf4
+#define MT6358_LDO_VFE28_CON0 0x1c08
+#define MT6358_LDO_VFE28_CON1 0x1c16
+#define MT6358_LDO_VFE28_CON2 0x1c18
+#define MT6358_LDO_VFE28_CON3 0x1c1a
+#define MT6358_LDO_VRF18_CON0 0x1c1c
+#define MT6358_LDO_VRF18_CON1 0x1c2a
+#define MT6358_LDO_VRF18_CON2 0x1c2c
+#define MT6358_LDO_VRF18_CON3 0x1c2e
+#define MT6358_LDO_VRF12_CON0 0x1c30
+#define MT6358_LDO_VRF12_CON1 0x1c3e
+#define MT6358_LDO_VRF12_CON2 0x1c40
+#define MT6358_LDO_VRF12_CON3 0x1c42
+#define MT6358_LDO_VEFUSE_CON0 0x1c44
+#define MT6358_LDO_VEFUSE_CON1 0x1c52
+#define MT6358_LDO_VEFUSE_CON2 0x1c54
+#define MT6358_LDO_VEFUSE_CON3 0x1c56
+#define MT6358_LDO_VCN18_CON0 0x1c58
+#define MT6358_LDO_VCN18_CON1 0x1c66
+#define MT6358_LDO_VCN18_CON2 0x1c68
+#define MT6358_LDO_VCN18_CON3 0x1c6a
+#define MT6358_LDO_VCAMA1_CON0 0x1c6c
+#define MT6358_LDO_VCAMA1_CON1 0x1c7a
+#define MT6358_LDO_VCAMA1_CON2 0x1c7c
+#define MT6358_LDO_VCAMA1_CON3 0x1c7e
+#define MT6358_LDO_VCAMA2_CON0 0x1c88
+#define MT6358_LDO_VCAMA2_CON1 0x1c96
+#define MT6358_LDO_VCAMA2_CON2 0x1c98
+#define MT6358_LDO_VCAMA2_CON3 0x1c9a
+#define MT6358_LDO_VCAMD_CON0 0x1c9c
+#define MT6358_LDO_VCAMD_CON1 0x1caa
+#define MT6358_LDO_VCAMD_CON2 0x1cac
+#define MT6358_LDO_VCAMD_CON3 0x1cae
+#define MT6358_LDO_VCAMIO_CON0 0x1cb0
+#define MT6358_LDO_VCAMIO_CON1 0x1cbe
+#define MT6358_LDO_VCAMIO_CON2 0x1cc0
+#define MT6358_LDO_VCAMIO_CON3 0x1cc2
+#define MT6358_LDO_VMC_CON0 0x1cc4
+#define MT6358_LDO_VMC_CON1 0x1cd2
+#define MT6358_LDO_VMC_CON2 0x1cd4
+#define MT6358_LDO_VMC_CON3 0x1cd6
+#define MT6358_LDO_VMCH_CON0 0x1cd8
+#define MT6358_LDO_VMCH_CON1 0x1ce6
+#define MT6358_LDO_VMCH_CON2 0x1ce8
+#define MT6358_LDO_VMCH_CON3 0x1cea
+#define MT6358_LDO_VIBR_CON0 0x1d08
+#define MT6358_LDO_VIBR_CON1 0x1d16
+#define MT6358_LDO_VIBR_CON2 0x1d18
+#define MT6358_LDO_VIBR_CON3 0x1d1a
+#define MT6358_LDO_VCN33_CON0_0 0x1d1c
+#define MT6358_LDO_VCN33_CON0_1 0x1d2a
+#define MT6358_LDO_VCN33_CON1 0x1d2c
+#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_CON2 0x1d2e
+#define MT6358_LDO_VCN33_CON3 0x1d30
+#define MT6358_LDO_VLDO28_CON0_0 0x1d32
+#define MT6358_LDO_VLDO28_CON0_1 0x1d40
+#define MT6358_LDO_VLDO28_CON1 0x1d42
+#define MT6358_LDO_VLDO28_CON2 0x1d44
+#define MT6358_LDO_VLDO28_CON3 0x1d46
+#define MT6358_LDO_VSIM1_CON0 0x1d48
+#define MT6358_LDO_VSIM1_CON1 0x1d56
+#define MT6358_LDO_VSIM1_CON2 0x1d58
+#define MT6358_LDO_VSIM1_CON3 0x1d5a
+#define MT6358_LDO_VSIM2_CON0 0x1d5c
+#define MT6358_LDO_VSIM2_CON1 0x1d6a
+#define MT6358_LDO_VSIM2_CON2 0x1d6c
+#define MT6358_LDO_VSIM2_CON3 0x1d6e
+#define MT6358_LDO_VCN28_CON0 0x1d88
+#define MT6358_LDO_VCN28_CON1 0x1d96
+#define MT6358_LDO_VCN28_CON2 0x1d98
+#define MT6358_LDO_VCN28_CON3 0x1d9a
+#define MT6358_VRTC28_CON0 0x1d9c
+#define MT6358_LDO_VBIF28_CON0 0x1d9e
+#define MT6358_LDO_VBIF28_CON1 0x1dac
+#define MT6358_LDO_VBIF28_CON2 0x1dae
+#define MT6358_LDO_VBIF28_CON3 0x1db0
+#define MT6358_VCAMA1_ANA_CON0 0x1e08
+#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VCN33_ANA_CON0 0x1e28
+#define MT6358_VSIM1_ANA_CON0 0x1e2c
+#define MT6358_VSIM2_ANA_CON0 0x1e30
+#define MT6358_VUSB_ANA_CON0 0x1e34
+#define MT6358_VEMC_ANA_CON0 0x1e38
+#define MT6358_VLDO28_ANA_CON0 0x1e3c
+#define MT6358_VIO28_ANA_CON0 0x1e40
+#define MT6358_VIBR_ANA_CON0 0x1e44
+#define MT6358_VMCH_ANA_CON0 0x1e48
+#define MT6358_VMC_ANA_CON0 0x1e4c
+#define MT6358_VRF18_ANA_CON0 0x1e88
+#define MT6358_VCN18_ANA_CON0 0x1e8c
+#define MT6358_VCAMIO_ANA_CON0 0x1e90
+#define MT6358_VIO18_ANA_CON0 0x1e94
+#define MT6358_VEFUSE_ANA_CON0 0x1e98
+#define MT6358_VRF12_ANA_CON0 0x1e9c
+#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0
+#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4
+#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6
+#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8
+#define MT6358_VDRAM2_ANA_CON0 0x1eaa
+#define MT6358_VCAMD_ANA_CON0 0x1eae
+#define MT6358_VA12_ANA_CON0 0x1eb2
+#define MT6358_AUD_TOP_INT_CON0 0x2228
+#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+
+#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h
new file mode 100644
index 000000000000..ea1304035d4d
--- /dev/null
+++ b/include/linux/mfd/mt6360.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MT6360_H__
+#define __MT6360_H__
+
+#include <linux/regmap.h>
+
+enum {
+ MT6360_SLAVE_PMU = 0,
+ MT6360_SLAVE_PMIC,
+ MT6360_SLAVE_LDO,
+ MT6360_SLAVE_TCPC,
+ MT6360_SLAVE_MAX,
+};
+
+#define MT6360_PMU_SLAVEID (0x34)
+#define MT6360_PMIC_SLAVEID (0x1A)
+#define MT6360_LDO_SLAVEID (0x64)
+#define MT6360_TCPC_SLAVEID (0x4E)
+
+struct mt6360_pmu_data {
+ struct i2c_client *i2c[MT6360_SLAVE_MAX];
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ unsigned int chip_rev;
+};
+
+/* PMU register defininition */
+#define MT6360_PMU_DEV_INFO (0x00)
+#define MT6360_PMU_CORE_CTRL1 (0x01)
+#define MT6360_PMU_RST1 (0x02)
+#define MT6360_PMU_CRCEN (0x03)
+#define MT6360_PMU_RST_PAS_CODE1 (0x04)
+#define MT6360_PMU_RST_PAS_CODE2 (0x05)
+#define MT6360_PMU_CORE_CTRL2 (0x06)
+#define MT6360_PMU_TM_PAS_CODE1 (0x07)
+#define MT6360_PMU_TM_PAS_CODE2 (0x08)
+#define MT6360_PMU_TM_PAS_CODE3 (0x09)
+#define MT6360_PMU_TM_PAS_CODE4 (0x0A)
+#define MT6360_PMU_IRQ_IND (0x0B)
+#define MT6360_PMU_IRQ_MASK (0x0C)
+#define MT6360_PMU_IRQ_SET (0x0D)
+#define MT6360_PMU_SHDN_CTRL (0x0E)
+#define MT6360_PMU_TM_INF (0x0F)
+#define MT6360_PMU_I2C_CTRL (0x10)
+#define MT6360_PMU_CHG_CTRL1 (0x11)
+#define MT6360_PMU_CHG_CTRL2 (0x12)
+#define MT6360_PMU_CHG_CTRL3 (0x13)
+#define MT6360_PMU_CHG_CTRL4 (0x14)
+#define MT6360_PMU_CHG_CTRL5 (0x15)
+#define MT6360_PMU_CHG_CTRL6 (0x16)
+#define MT6360_PMU_CHG_CTRL7 (0x17)
+#define MT6360_PMU_CHG_CTRL8 (0x18)
+#define MT6360_PMU_CHG_CTRL9 (0x19)
+#define MT6360_PMU_CHG_CTRL10 (0x1A)
+#define MT6360_PMU_CHG_CTRL11 (0x1B)
+#define MT6360_PMU_CHG_CTRL12 (0x1C)
+#define MT6360_PMU_CHG_CTRL13 (0x1D)
+#define MT6360_PMU_CHG_CTRL14 (0x1E)
+#define MT6360_PMU_CHG_CTRL15 (0x1F)
+#define MT6360_PMU_CHG_CTRL16 (0x20)
+#define MT6360_PMU_CHG_AICC_RESULT (0x21)
+#define MT6360_PMU_DEVICE_TYPE (0x22)
+#define MT6360_PMU_QC_CONTROL1 (0x23)
+#define MT6360_PMU_QC_CONTROL2 (0x24)
+#define MT6360_PMU_QC30_CONTROL1 (0x25)
+#define MT6360_PMU_QC30_CONTROL2 (0x26)
+#define MT6360_PMU_USB_STATUS1 (0x27)
+#define MT6360_PMU_QC_STATUS1 (0x28)
+#define MT6360_PMU_QC_STATUS2 (0x29)
+#define MT6360_PMU_CHG_PUMP (0x2A)
+#define MT6360_PMU_CHG_CTRL17 (0x2B)
+#define MT6360_PMU_CHG_CTRL18 (0x2C)
+#define MT6360_PMU_CHRDET_CTRL1 (0x2D)
+#define MT6360_PMU_CHRDET_CTRL2 (0x2E)
+#define MT6360_PMU_DPDN_CTRL (0x2F)
+#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30)
+#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31)
+#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32)
+#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33)
+#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34)
+#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35)
+#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36)
+#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37)
+#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38)
+#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39)
+#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A)
+#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B)
+#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C)
+#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D)
+#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E)
+#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F)
+#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40)
+#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41)
+#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42)
+#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43)
+#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44)
+#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45)
+#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46)
+#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47)
+#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48)
+#define MT6360_PMU_BC12_CTRL (0x49)
+#define MT6360_PMU_CHG_STAT (0x4A)
+#define MT6360_PMU_RESV1 (0x4B)
+#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E)
+#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F)
+#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50)
+#define MT6360_PMU_TYPEC_OTP_CTRL (0x51)
+#define MT6360_PMU_ADC_BAT_DATA_H (0x52)
+#define MT6360_PMU_ADC_BAT_DATA_L (0x53)
+#define MT6360_PMU_IMID_BACKBST_ON (0x54)
+#define MT6360_PMU_IMID_BACKBST_OFF (0x55)
+#define MT6360_PMU_ADC_CONFIG (0x56)
+#define MT6360_PMU_ADC_EN2 (0x57)
+#define MT6360_PMU_ADC_IDLE_T (0x58)
+#define MT6360_PMU_ADC_RPT_1 (0x5A)
+#define MT6360_PMU_ADC_RPT_2 (0x5B)
+#define MT6360_PMU_ADC_RPT_3 (0x5C)
+#define MT6360_PMU_ADC_RPT_ORG1 (0x5D)
+#define MT6360_PMU_ADC_RPT_ORG2 (0x5E)
+#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F)
+#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60)
+#define MT6360_PMU_CHG_CTRL19 (0x61)
+#define MT6360_PMU_VDDASUPPLY (0x62)
+#define MT6360_PMU_BC12_MANUAL (0x63)
+#define MT6360_PMU_CHGDET_FUNC (0x64)
+#define MT6360_PMU_FOD_CTRL (0x65)
+#define MT6360_PMU_CHG_CTRL20 (0x66)
+#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67)
+#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68)
+#define MT6360_PMU_RESV2 (0x69)
+#define MT6360_PMU_USBID_CTRL1 (0x6D)
+#define MT6360_PMU_USBID_CTRL2 (0x6E)
+#define MT6360_PMU_USBID_CTRL3 (0x6F)
+#define MT6360_PMU_FLED_CFG (0x70)
+#define MT6360_PMU_RESV3 (0x71)
+#define MT6360_PMU_FLED1_CTRL (0x72)
+#define MT6360_PMU_FLED_STRB_CTRL (0x73)
+#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74)
+#define MT6360_PMU_FLED1_TOR_CTRL (0x75)
+#define MT6360_PMU_FLED2_CTRL (0x76)
+#define MT6360_PMU_RESV4 (0x77)
+#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78)
+#define MT6360_PMU_FLED2_TOR_CTRL (0x79)
+#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A)
+#define MT6360_PMU_FLED_VMID_RTM (0x7B)
+#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C)
+#define MT6360_PMU_FLED_PWSEL (0x7D)
+#define MT6360_PMU_FLED_EN (0x7E)
+#define MT6360_PMU_FLED_Hidden1 (0x7F)
+#define MT6360_PMU_RGB_EN (0x80)
+#define MT6360_PMU_RGB1_ISNK (0x81)
+#define MT6360_PMU_RGB2_ISNK (0x82)
+#define MT6360_PMU_RGB3_ISNK (0x83)
+#define MT6360_PMU_RGB_ML_ISNK (0x84)
+#define MT6360_PMU_RGB1_DIM (0x85)
+#define MT6360_PMU_RGB2_DIM (0x86)
+#define MT6360_PMU_RGB3_DIM (0x87)
+#define MT6360_PMU_RESV5 (0x88)
+#define MT6360_PMU_RGB12_Freq (0x89)
+#define MT6360_PMU_RGB34_Freq (0x8A)
+#define MT6360_PMU_RGB1_Tr (0x8B)
+#define MT6360_PMU_RGB1_Tf (0x8C)
+#define MT6360_PMU_RGB1_TON_TOFF (0x8D)
+#define MT6360_PMU_RGB2_Tr (0x8E)
+#define MT6360_PMU_RGB2_Tf (0x8F)
+#define MT6360_PMU_RGB2_TON_TOFF (0x90)
+#define MT6360_PMU_RGB3_Tr (0x91)
+#define MT6360_PMU_RGB3_Tf (0x92)
+#define MT6360_PMU_RGB3_TON_TOFF (0x93)
+#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94)
+#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95)
+#define MT6360_PMU_RESV6 (0x97)
+#define MT6360_PMU_SPARE1 (0x9A)
+#define MT6360_PMU_SPARE2 (0xA0)
+#define MT6360_PMU_SPARE3 (0xB0)
+#define MT6360_PMU_SPARE4 (0xC0)
+#define MT6360_PMU_CHG_IRQ1 (0xD0)
+#define MT6360_PMU_CHG_IRQ2 (0xD1)
+#define MT6360_PMU_CHG_IRQ3 (0xD2)
+#define MT6360_PMU_CHG_IRQ4 (0xD3)
+#define MT6360_PMU_CHG_IRQ5 (0xD4)
+#define MT6360_PMU_CHG_IRQ6 (0xD5)
+#define MT6360_PMU_QC_IRQ (0xD6)
+#define MT6360_PMU_FOD_IRQ (0xD7)
+#define MT6360_PMU_BASE_IRQ (0xD8)
+#define MT6360_PMU_FLED_IRQ1 (0xD9)
+#define MT6360_PMU_FLED_IRQ2 (0xDA)
+#define MT6360_PMU_RGB_IRQ (0xDB)
+#define MT6360_PMU_BUCK1_IRQ (0xDC)
+#define MT6360_PMU_BUCK2_IRQ (0xDD)
+#define MT6360_PMU_LDO_IRQ1 (0xDE)
+#define MT6360_PMU_LDO_IRQ2 (0xDF)
+#define MT6360_PMU_CHG_STAT1 (0xE0)
+#define MT6360_PMU_CHG_STAT2 (0xE1)
+#define MT6360_PMU_CHG_STAT3 (0xE2)
+#define MT6360_PMU_CHG_STAT4 (0xE3)
+#define MT6360_PMU_CHG_STAT5 (0xE4)
+#define MT6360_PMU_CHG_STAT6 (0xE5)
+#define MT6360_PMU_QC_STAT (0xE6)
+#define MT6360_PMU_FOD_STAT (0xE7)
+#define MT6360_PMU_BASE_STAT (0xE8)
+#define MT6360_PMU_FLED_STAT1 (0xE9)
+#define MT6360_PMU_FLED_STAT2 (0xEA)
+#define MT6360_PMU_RGB_STAT (0xEB)
+#define MT6360_PMU_BUCK1_STAT (0xEC)
+#define MT6360_PMU_BUCK2_STAT (0xED)
+#define MT6360_PMU_LDO_STAT1 (0xEE)
+#define MT6360_PMU_LDO_STAT2 (0xEF)
+#define MT6360_PMU_CHG_MASK1 (0xF0)
+#define MT6360_PMU_CHG_MASK2 (0xF1)
+#define MT6360_PMU_CHG_MASK3 (0xF2)
+#define MT6360_PMU_CHG_MASK4 (0xF3)
+#define MT6360_PMU_CHG_MASK5 (0xF4)
+#define MT6360_PMU_CHG_MASK6 (0xF5)
+#define MT6360_PMU_QC_MASK (0xF6)
+#define MT6360_PMU_FOD_MASK (0xF7)
+#define MT6360_PMU_BASE_MASK (0xF8)
+#define MT6360_PMU_FLED_MASK1 (0xF9)
+#define MT6360_PMU_FLED_MASK2 (0xFA)
+#define MT6360_PMU_FAULTB_MASK (0xFB)
+#define MT6360_PMU_BUCK1_MASK (0xFC)
+#define MT6360_PMU_BUCK2_MASK (0xFD)
+#define MT6360_PMU_LDO_MASK1 (0xFE)
+#define MT6360_PMU_LDO_MASK2 (0xFF)
+#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2)
+
+/* MT6360_PMU_IRQ_SET */
+#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1)
+#define MT6360_IRQ_RETRIG BIT(2)
+
+#define CHIP_VEN_MASK (0xF0)
+#define CHIP_VEN_MT6360 (0x50)
+#define CHIP_REV_MASK (0x0F)
+
+#endif /* __MT6360_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index fc88d315bdde..949268581b36 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -8,9 +8,11 @@
#define __MFD_MT6397_CORE_H__
#include <linux/mutex.h>
+#include <linux/notifier.h>
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6358_CHIP_ID = 0x58,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
@@ -54,6 +56,7 @@ enum mt6397_irq_numbers {
struct mt6397_chip {
struct device *dev;
struct regmap *regmap;
+ struct notifier_block pm_nb;
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
@@ -63,8 +66,10 @@ struct mt6397_chip {
u16 int_con[2];
u16 int_status[2];
u16 chip_id;
+ void *irq_data;
};
+int mt6358_irq_init(struct mt6397_chip *chip);
int mt6397_irq_init(struct mt6397_chip *chip);
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 7dfb63b81373..66989a16221a 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -18,7 +18,9 @@
#define RTC_BBPU_CBUSY BIT(6)
#define RTC_BBPU_KEY (0x43 << 8)
-#define RTC_WRTGR 0x003c
+#define RTC_WRTGR_MT6358 0x003a
+#define RTC_WRTGR_MT6397 0x003c
+#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397
#define RTC_IRQ_STA 0x0002
#define RTC_IRQ_STA_AL BIT(0)
@@ -65,6 +67,10 @@
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+struct mtk_rtc_data {
+ u32 wrtgr;
+};
+
struct mt6397_rtc {
struct device *dev;
struct rtc_device *rtc_dev;
@@ -74,6 +80,7 @@ struct mt6397_rtc {
struct regmap *regmap;
int irq;
u32 addr_base;
+ const struct mtk_rtc_data *data;
};
#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
index 3c67983678ec..744dce63946e 100644
--- a/include/linux/mfd/stmfx.h
+++ b/include/linux/mfd/stmfx.h
@@ -109,6 +109,7 @@ struct stmfx {
struct device *dev;
struct regmap *map;
struct regulator *vdd;
+ int irq;
struct irq_domain *irq_domain;
struct mutex lock; /* IRQ bus lock */
u8 irq_src;
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 3d7c3c26eeb9..c4a940d98912 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -331,8 +331,6 @@ struct mhi_controller_config {
* @wlock: Lock for protecting device wakeup
* @mhi_link_info: Device bandwidth info
* @st_worker: State transition worker
- * @fw_worker: Firmware download worker
- * @syserr_worker: System error worker
* @state_event: State change event
* @status_cb: CB function to notify power states of the device (required)
* @wake_get: CB function to assert device wake (optional)
@@ -412,8 +410,6 @@ struct mhi_controller {
spinlock_t wlock;
struct mhi_link_info mhi_link_info;
struct work_struct st_worker;
- struct work_struct fw_worker;
- struct work_struct syserr_worker;
wait_queue_head_t state_event;
void (*status_cb)(struct mhi_controller *mhi_cntrl,
@@ -573,6 +569,13 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
enum mhi_state state);
/**
+ * mhi_notify - Notify the MHI client driver about client device status
+ * @mhi_dev: MHI device instance
+ * @cb_reason: MHI callback reason
+ */
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
+
+/**
* mhi_prepare_for_power_up - Do pre-initialization before power up.
* This is optional, call this before power up if
* the controller does not want bus framework to
@@ -609,6 +612,18 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 20372de0b587..06e066e04a4b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -573,7 +573,6 @@ struct mlx4_caps {
int reserved_eqs;
int num_comp_vectors;
int num_mpts;
- int max_fmr_maps;
int num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
@@ -707,17 +706,6 @@ struct mlx4_mw {
int enabled;
};
-struct mlx4_fmr {
- struct mlx4_mr mr;
- struct mlx4_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- int max_pages;
- int max_maps;
- int maps;
- u8 page_shift;
-};
-
struct mlx4_uar {
unsigned long pfn;
int index;
@@ -1412,14 +1400,6 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey);
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey);
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
@@ -1522,6 +1502,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enable);
+
+struct mlx4_mpt_entry;
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry);
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1a56dc079c32..116bd9bb347f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1321,7 +1321,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 stat_rate_support[0x10];
u8 reserved_at_1f0[0x1];
u8 pci_sync_for_fw_update_event[0x1];
- u8 reserved_at_1f2[0xa];
+ u8 reserved_at_1f2[0x6];
+ u8 init2_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1fa[0x3];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -3688,7 +3690,8 @@ struct mlx5_ifc_dctc_bits {
u8 ecn[0x2];
u8 dscp[0x6];
- u8 reserved_at_1c0[0x40];
+ u8 reserved_at_1c0[0x20];
+ u8 ece[0x20];
};
enum {
@@ -7938,7 +7941,7 @@ struct mlx5_ifc_create_dct_out_bits {
u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_dct_in_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 1af5e460b5f6..b8992b861ae6 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -66,6 +66,7 @@ enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15,
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
MLX5_QP_OPTPAR_SRQN = 1 << 18,
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
@@ -321,6 +322,7 @@ struct mlx5_av {
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
+ u8 xmit_port;
};
static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
@@ -493,72 +495,6 @@ struct mlx5_core_dct {
struct completion drained;
};
-struct mlx5_qp_path {
- u8 fl_free_ar;
- u8 rsvd3;
- __be16 pkey_index;
- u8 rsvd0;
- u8 grh_mlid;
- __be16 rlid;
- u8 ackto_lt;
- u8 mgid_index;
- u8 static_rate;
- u8 hop_limit;
- __be32 tclass_flowlabel;
- union {
- u8 rgid[16];
- u8 rip[16];
- };
- u8 f_dscp_ecn_prio;
- u8 ecn_dscp;
- __be16 udp_sport;
- u8 dci_cfi_prio_sl;
- u8 port;
- u8 rmac[6];
-};
-
-/* FIXME: use mlx5_ifc.h qpc */
-struct mlx5_qp_context {
- __be32 flags;
- __be32 flags_pd;
- u8 mtu_msgmax;
- u8 rq_size_stride;
- __be16 sq_crq_size;
- __be32 qp_counter_set_usr_page;
- __be32 wire_qpn;
- __be32 log_pg_sz_remote_qpn;
- struct mlx5_qp_path pri_path;
- struct mlx5_qp_path alt_path;
- __be32 params1;
- u8 reserved2[4];
- __be32 next_send_psn;
- __be32 cqn_send;
- __be32 deth_sqpn;
- u8 reserved3[4];
- __be32 last_acked_psn;
- __be32 ssn;
- __be32 params2;
- __be32 rnr_nextrecvpsn;
- __be32 xrcd;
- __be32 cqn_recv;
- __be64 db_rec_addr;
- __be32 qkey;
- __be32 rq_type_srqn;
- __be32 rmsn;
- __be16 hw_sq_wqe_counter;
- __be16 sw_sq_wqe_counter;
- __be16 hw_rcyclic_byte_counter;
- __be16 hw_rq_counter;
- __be16 sw_rcyclic_byte_counter;
- __be16 sw_rq_counter;
- u8 rsvd0[5];
- u8 cgs;
- u8 cs_req;
- u8 cs_res;
- __be64 dc_access_key;
- u8 rsvd1[24];
-};
-
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 59adb47efc55..dc7b87310c10 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
@@ -28,6 +29,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
+#include <linux/pgtable.h>
struct mempolicy;
struct anon_vma;
@@ -92,7 +94,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
/*
@@ -401,7 +402,7 @@ extern pgprot_t protection_map[16];
* @FAULT_FLAG_WRITE: Fault was a write fault.
* @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
* @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
- * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
* @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
* @FAULT_FLAG_TRIED: The fault has been tried once.
* @FAULT_FLAG_USER: The fault originated in userspace.
@@ -451,10 +452,10 @@ extern pgprot_t protection_map[16];
* fault_flag_allow_retry_first - check ALLOW_RETRY the first time
*
* This is mostly used for places where we want to try to avoid taking
- * the mmap_sem for too long a time when waiting for another condition
+ * the mmap_lock for too long a time when waiting for another condition
* to change, in which case we can try to be polite to release the
- * mmap_sem in the first round to avoid potential starvation of other
- * processes that would also want the mmap_sem.
+ * mmap_lock in the first round to avoid potential starvation of other
+ * processes that would also want the mmap_lock.
*
* Return: true if the page fault allows retry and this is the first
* attempt of the fault handling; false otherwise.
@@ -501,7 +502,6 @@ struct vm_fault {
pte_t orig_pte; /* Value of PTE at the time of fault */
struct page *cow_page; /* Page handler may use for COW fault */
- struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -582,7 +582,7 @@ struct vm_operations_struct {
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
- * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
@@ -777,6 +777,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
}
extern void kvfree(const void *addr);
+extern void kvfree_sensitive(const void *addr, size_t len);
/*
* Mapcount of compound page as a whole, does not include mapped sub-pages.
@@ -867,7 +868,7 @@ enum compound_dtor_id {
#endif
NR_COMPOUND_DTORS,
};
-extern compound_page_dtor * const compound_page_dtors[];
+extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
static inline void set_compound_page_dtor(struct page *page,
enum compound_dtor_id compound_dtor)
@@ -876,10 +877,10 @@ static inline void set_compound_page_dtor(struct page *page,
page[1].compound_dtor = compound_dtor;
}
-static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+static inline void destroy_compound_page(struct page *page)
{
VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
- return compound_page_dtors[page[1].compound_dtor];
+ compound_page_dtors[page[1].compound_dtor](page);
}
static inline unsigned int compound_order(struct page *page)
@@ -946,8 +947,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
- struct page *page);
+vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
@@ -1374,7 +1374,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid)
static inline bool cpupid_pid_unset(int cpupid)
{
- return 1;
+ return true;
}
static inline void page_cpupid_reset_last(struct page *page)
@@ -1707,6 +1707,8 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
+long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1727,7 +1729,7 @@ struct frame_vector {
unsigned int nr_frames; /* Number of frames stored in ptrs array */
bool got_ref; /* Did we pin pages by getting page ref? */
bool is_pfns; /* Does array contain pages or pfns? */
- void *ptrs[0]; /* Array of pinned pfns / pages. Use
+ void *ptrs[]; /* Array of pinned pfns / pages. Use
* pfns_vector_pages() or pfns_vector_pfns()
* for access */
};
@@ -1825,8 +1827,16 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
/*
* doesn't attempt to fault and will return short.
*/
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
+int get_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
+int pin_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
+
+static inline bool get_user_page_fast_only(unsigned long addr,
+ unsigned int gup_flags, struct page **pagep)
+{
+ return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
+}
/*
* per-process(per-mm_struct) statistics.
*/
@@ -2069,11 +2079,6 @@ int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
-/*
- * The following ifdef needed to get the 5level-fixup.h header to work.
- * Remove it when 5level-fixup.h has been removed.
- */
-#ifndef __ARCH_HAS_5LEVEL_HACK
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
@@ -2102,8 +2107,6 @@ static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
return p4d_offset(pgd, address);
}
-#endif /* !__ARCH_HAS_5LEVEL_HACK */
-
static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
unsigned long address,
pgtbl_mod_mask *mod_mask)
@@ -2327,9 +2330,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
}
extern void __init pagecache_init(void);
-extern void free_area_init(unsigned long * zones_size);
-extern void __init free_area_init_node(int nid, unsigned long * zones_size,
- unsigned long zone_start_pfn, unsigned long *zholes_size);
+extern void __init free_area_init_memoryless_node(int nid);
extern void free_initmem(void);
/*
@@ -2399,34 +2400,26 @@ static inline unsigned long get_num_physpages(void)
return phys_pages;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
- * zones, allocate the backing mem_map and account for memory holes in a more
- * architecture independent manner. This is a substitute for creating the
- * zone_sizes[] and zholes_size[] arrays and passing them to
- * free_area_init_node()
+ * Using memblock node mappings, an architecture may initialise its
+ * zones, allocate the backing mem_map and account for memory holes in an
+ * architecture independent manner.
*
* An architecture is expected to register range of page frames backed by
* physical memory with memblock_add[_node]() before calling
- * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
+ * free_area_init() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
* memblock_add_node(base, size, nid)
- * free_area_init_nodes(max_zone_pfns);
+ * free_area_init(max_zone_pfns);
*
- * free_bootmem_with_active_regions() calls free_bootmem_node() for each
- * registered physical page range. Similarly
* sparse_memory_present_with_active_regions() calls memory_present() for
* each range when SPARSEMEM is enabled.
- *
- * See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
*/
-extern void free_area_init_nodes(unsigned long *max_zone_pfn);
+void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
@@ -2435,16 +2428,10 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);
-extern void free_bootmem_with_active_regions(int nid,
- unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid);
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
-#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
- !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
-static inline int __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state)
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
}
@@ -2480,6 +2467,7 @@ extern void setup_per_cpu_pageset(void);
extern int min_free_kbytes;
extern int watermark_boost_factor;
extern int watermark_scale_factor;
+extern bool arch_has_descending_max_zone_pfns(void);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
@@ -2816,6 +2804,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
+#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ef6d3aface8a..64ede5f150dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -344,7 +344,7 @@ struct vm_area_struct {
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
- struct list_head anon_vma_chain; /* Serialized by mmap_sem &
+ struct list_head anon_vma_chain; /* Serialized by mmap_lock &
* page_table_lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
@@ -440,7 +440,7 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
- struct rw_semaphore mmap_sem;
+ struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
* are globally strung together off
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
new file mode 100644
index 000000000000..0707671851a8
--- /dev/null
+++ b/include/linux/mmap_lock.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_MMAP_LOCK_H
+#define _LINUX_MMAP_LOCK_H
+
+#include <linux/mmdebug.h>
+
+#define MMAP_LOCK_INITIALIZER(name) \
+ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+
+static inline void mmap_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock(struct mm_struct *mm)
+{
+ down_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+ down_write_nested(&mm->mmap_lock, subclass);
+}
+
+static inline int mmap_write_lock_killable(struct mm_struct *mm)
+{
+ return down_write_killable(&mm->mmap_lock);
+}
+
+static inline bool mmap_write_trylock(struct mm_struct *mm)
+{
+ return down_write_trylock(&mm->mmap_lock) != 0;
+}
+
+static inline void mmap_write_unlock(struct mm_struct *mm)
+{
+ up_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_downgrade(struct mm_struct *mm)
+{
+ downgrade_write(&mm->mmap_lock);
+}
+
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ down_read(&mm->mmap_lock);
+}
+
+static inline int mmap_read_lock_killable(struct mm_struct *mm)
+{
+ return down_read_killable(&mm->mmap_lock);
+}
+
+static inline bool mmap_read_trylock(struct mm_struct *mm)
+{
+ return down_read_trylock(&mm->mmap_lock) != 0;
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ up_read(&mm->mmap_lock);
+}
+
+static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
+{
+ if (down_read_trylock(&mm->mmap_lock)) {
+ rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
+ return true;
+ }
+ return false;
+}
+
+static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
+{
+ up_read_non_owner(&mm->mmap_lock);
+}
+
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held_write(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index d9a543a9e1cc..c51a84132d7c 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -4,11 +4,6 @@
#include <asm/mmu_context.h>
-struct mm_struct;
-
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
-
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 736f6918335e..fc68f3570e19 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -121,7 +122,7 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
- * paired and are called only when the mmap_sem and/or the
+ * paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
* can't guarantee that no additional references are taken to
* the pages in the range, it has to implement the
@@ -212,13 +213,13 @@ struct mmu_notifier_ops {
};
/*
- * The notifier chains are protected by mmap_sem and/or the reverse map
+ * The notifier chains are protected by mmap_lock and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
- * the mmap_sem locks are taken.
+ * the mmap_lock locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
- * 1. mmap_sem is held.
+ * 1. mmap_lock is held.
* 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
@@ -277,9 +278,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
{
struct mmu_notifier *ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = mmu_notifier_get_locked(ops, mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
void mmu_notifier_put(struct mmu_notifier *subscription);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f5b6ccf41141..c4c37fd12104 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -242,19 +242,6 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
-struct zone_reclaim_stat {
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are referenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
-};
-
enum lruvec_flags {
LRUVEC_CONGESTED, /* lruvec has many dirty pages
* backed by a congested BDI
@@ -263,7 +250,13 @@ enum lruvec_flags {
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
- struct zone_reclaim_stat reclaim_stat;
+ /*
+ * These track the cost of reclaiming one LRU - file or anon -
+ * over the other. As the observed cost of reclaiming one LRU
+ * increases, the reclaim scan balance tips toward the other.
+ */
+ unsigned long anon_cost;
+ unsigned long file_cost;
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
/* Refaults at the time of last reclaim cycle */
@@ -667,9 +660,21 @@ struct deferred_split {
* per-zone basis.
*/
typedef struct pglist_data {
+ /*
+ * node_zones contains just the zones for THIS node. Not all of the
+ * zones may be populated, but it is the full list. It is referenced by
+ * this node's node_zonelists as well as other node's node_zonelists.
+ */
struct zone node_zones[MAX_NR_ZONES];
+
+ /*
+ * node_zonelists contains references to all zones in all nodes.
+ * Generally the first zones will be references to this node's
+ * node_zones.
+ */
struct zonelist node_zonelists[MAX_ZONELISTS];
- int nr_zones;
+
+ int nr_zones; /* number of populated zones in this node */
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
@@ -680,6 +685,8 @@ typedef struct pglist_data {
/*
* Must be held any time you expect node_start_pfn,
* node_present_pages, node_spanned_pages or nr_zones to stay constant.
+ * Also synchronizes pgdat->first_deferred_pfn during deferred page
+ * init.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
@@ -699,13 +706,13 @@ typedef struct pglist_data {
struct task_struct *kswapd; /* Protected by
mem_hotplug_begin/end() */
int kswapd_order;
- enum zone_type kswapd_classzone_idx;
+ enum zone_type kswapd_highest_zoneidx;
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
- enum zone_type kcompactd_classzone_idx;
+ enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
@@ -783,15 +790,15 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
- enum zone_type classzone_idx);
+ enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
- int classzone_idx, unsigned int alloc_flags,
+ int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx,
+ unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx);
+ unsigned long mark, int highest_zoneidx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
@@ -876,7 +883,7 @@ extern int movable_zone;
#ifdef CONFIG_HIGHMEM
static inline int zone_movable_is_highmem(void)
{
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+#ifdef CONFIG_NEED_MULTIPLE_NODES
return movable_zone == ZONE_HIGHMEM;
#else
return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
@@ -1079,15 +1086,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#include <asm/sparsemem.h>
#endif
-#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
- !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-static inline unsigned long early_pfn_to_nid(unsigned long pfn)
-{
- BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
- return 0;
-}
-#endif
-
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn) (0)
#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 580e554fb6dc..8d764aab29de 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -532,6 +532,8 @@ enum dmi_field {
DMI_BIOS_VENDOR,
DMI_BIOS_VERSION,
DMI_BIOS_DATE,
+ DMI_BIOS_RELEASE,
+ DMI_EC_FIRMWARE_RELEASE,
DMI_SYS_VENDOR,
DMI_PRODUCT_NAME,
DMI_PRODUCT_VERSION,
diff --git a/include/linux/module.h b/include/linux/module.h
index d849d06e4d44..2e6670860d27 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -866,14 +866,6 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
-#ifdef CONFIG_STRICT_MODULE_RWX
-extern void module_enable_ro(const struct module *mod, bool after_init);
-extern void module_disable_ro(const struct module *mod);
-#else
-static inline void module_enable_ro(const struct module *mod, bool after_init) { }
-static inline void module_disable_ro(const struct module *mod) { }
-#endif
-
#ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 7edac8c7a9c1..de657bd211fa 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -111,4 +111,6 @@ extern unsigned int sysctl_mount_max;
extern bool path_is_mountpoint(const struct path *path);
+extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 886e30441c90..d890805f5494 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -98,7 +98,7 @@ struct nand_bbt_descr {
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
* in nand_chip.bbt_options.
*/
#define NAND_BBT_DYNAMICSTRUCT 0x80000000
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index c98a21108688..fd1ecb821106 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -138,7 +138,7 @@ struct cfi_ident {
uint16_t InterfaceDesc;
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
+ uint32_t EraseRegionInfo[]; /* Not host ordered */
} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -165,7 +165,7 @@ struct cfi_pri_intelext {
uint16_t ProtRegAddr;
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
- uint8_t extra[0];
+ uint8_t extra[];
} __packed;
struct cfi_intelext_otpinfo {
@@ -286,7 +286,7 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[0]; /* per-chip data structure for each chip */
+ struct flchip chips[]; /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 2d1f4a61f4ac..157357ec1441 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -200,6 +200,8 @@ struct mtd_debug_info {
*
* @node: list node used to add an MTD partition to the parent partition list
* @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ * MTD_SLC_ON_MLC_EMULATION is set
* @flags: original flags (before the mtdpart logic decided to tweak them based
* on flash constraints, like eraseblock/pagesize alignment)
*
@@ -209,6 +211,7 @@ struct mtd_debug_info {
struct mtd_part {
struct list_head node;
u64 offset;
+ u64 size;
u32 flags;
};
@@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
{
- return mtd->erasesize / mtd->writesize;
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ return master->erasesize / mtd->writesize;
}
static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index e545c050d3e8..b74a539ec581 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,6 +37,7 @@
* master MTD flag set for the corresponding MTD partition.
* For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
*
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
@@ -48,6 +49,7 @@ struct mtd_partition {
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ uint32_t add_flags; /* flags to add to the partition */
struct device_node *of_node;
};
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index df5b9fddea16..2e3f43788d48 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[0];
+ struct flchip chips[];
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 1e76196f9829..65b1c1c18b41 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -83,14 +83,14 @@ struct nand_chip;
/*
* Constants for ECC_MODES
*/
-typedef enum {
+enum nand_ecc_mode {
+ NAND_ECC_INVALID,
NAND_ECC_NONE,
NAND_ECC_SOFT,
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
NAND_ECC_ON_DIE,
-} nand_ecc_modes_t;
+};
enum nand_ecc_algo {
NAND_ECC_UNKNOWN,
@@ -119,85 +119,73 @@ enum nand_ecc_algo {
#define NAND_ECC_MAXIMIZE BIT(1)
/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 BIT(1)
+
+/*
* When using software implementation of Hamming, we can specify which byte
* ordering should be used.
*/
#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
- */
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
* autoincrement.
*/
-#define NAND_NEED_READRDY 0x00000100
+#define NAND_NEED_READRDY BIT(8)
/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+#define NAND_BROKEN_XD BIT(10)
/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+#define NAND_ROM BIT(11)
/* Device supports subpage reads */
-#define NAND_SUBPAGE_READ 0x00001000
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/*
* Some MLC NANDs need data scrambling to limit bitflips caused by repeated
* patterns.
*/
-#define NAND_NEED_SCRAMBLING 0x00002000
+#define NAND_NEED_SCRAMBLING BIT(13)
/* Device needs 3rd row address cycle */
-#define NAND_ROW_ADDR_3 0x00004000
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
-
-/* Macros to identify the above */
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-
-/*
- * There are different places where the manufacturer stores the factory bad
- * block markers.
- *
- * Position within the block: Each of these pages needs to be checked for a
- * bad block marking pattern.
- */
-#define NAND_BBM_FIRSTPAGE 0x01000000
-#define NAND_BBM_SECONDPAGE 0x02000000
-#define NAND_BBM_LASTPAGE 0x04000000
-
-/* Position within the OOB data of the page */
-#define NAND_BBM_POS_SMALL 5
-#define NAND_BBM_POS_LARGE 0
+#define NAND_ROW_ADDR_3 BIT(14)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
+#define NAND_SKIP_BBTSCAN BIT(16)
/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
-#define NAND_BUSWIDTH_AUTO 0x00080000
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
/*
* This option could be defined by controller drivers to protect against
* kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
*/
-#define NAND_USE_BOUNCE_BUFFER 0x00100000
+#define NAND_USES_DMA BIT(20)
/*
* In case your controller is implementing ->legacy.cmd_ctrl() and is relying
@@ -207,26 +195,49 @@ enum nand_ecc_algo {
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
-#define NAND_WAIT_TCCS 0x00200000
+#define NAND_WAIT_TCCS BIT(21)
/*
* Whether the NAND chip is a boot medium. Drivers might use this information
* to select ECC algorithms supported by the boot ROM or similar restrictions.
*/
-#define NAND_IS_BOOT_MEDIUM 0x00400000
+#define NAND_IS_BOOT_MEDIUM BIT(22)
/*
* Do not try to tweak the timings at runtime. This is needed when the
* controller initializes the timings on itself or when it relies on
* configuration done by the bootloader.
*/
-#define NAND_KEEP_TIMINGS 0x00800000
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
/**
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
@@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \
* @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
+ enum nand_ecc_mode mode;
enum nand_ecc_algo algo;
int steps;
int size;
@@ -491,13 +502,17 @@ enum nand_data_interface_type {
/**
* struct nand_data_interface - NAND interface timing
* @type: type of the timing
- * @timings: The timing, type according to @type
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
*/
struct nand_data_interface {
enum nand_data_interface_type type;
- union {
- struct nand_sdr_timings sdr;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ };
} timings;
};
@@ -694,6 +709,7 @@ struct nand_op_instr {
/**
* struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
* @instrs: array of instructions
* @ninstrs: length of the @instrs array
* @first_instr_start_off: offset to start from for the first instruction
@@ -709,6 +725,7 @@ struct nand_op_instr {
* controller driver.
*/
struct nand_subop {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
unsigned int first_instr_start_off;
@@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page);
int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
u8 *subfeature_param);
-/* Default read_page_raw implementation */
+/* read_page_raw implementations */
int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
-/* Default write_page_raw implementation */
+/* write_page_raw implementations */
int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool force_8bit);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
- bool force_8bit);
+ bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
@@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip);
* sucessful nand_scan().
*/
void nand_cleanup(struct nand_chip *chip);
-/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
@@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
void nand_select_target(struct nand_chip *chip, unsigned int cs);
void nand_deselect_target(struct nand_chip *chip);
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
/**
* nand_get_data_buf() - Get the internal page buffer
* @chip: NAND chip object
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 1e2af0ec1f03..60bac2c0ec45 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -20,6 +20,7 @@
*/
/* Flash opcodes. */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_WREN 0x06 /* Write enable */
#define SPINOR_OP_RDSR 0x05 /* Read status register */
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
@@ -80,7 +81,6 @@
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
-#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
/* Used for S3AN flashes only */
@@ -302,7 +302,7 @@ struct spi_nor;
* @read: read data from the SPI NOR.
* @write: write data to the SPI NOR.
* @erase: erase a sector of the SPI NOR at the offset @offs; if
- * not provided by the driver, spi-nor will send the erase
+ * not provided by the driver, SPI NOR will send the erase
* opcode via write_reg().
*/
struct spi_nor_controller_ops {
@@ -327,16 +327,16 @@ struct spi_nor_manufacturer;
struct spi_nor_flash_parameter;
/**
- * struct spi_nor - Structure for defining a the SPI NOR layer
- * @mtd: point to a mtd_info structure
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd: an mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
- * @dev: point to a spi device, or a spi nor controller device.
- * @spimem: point to the spi mem device
+ * @dev: pointer to an SPI device or an SPI NOR controller device
+ * @spimem: pointer to the SPI memory device
* @bouncebuf: bounce buffer used when the buffer passed by the MTD
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
- * @info: spi-nor part JDEC MFR id and other info
- * @manufacturer: spi-nor manufacturer
+ * @info: SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer: SPI NOR manufacturer
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -344,17 +344,17 @@ struct spi_nor_flash_parameter;
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
* @sst_write_second: used by the SST write operation
- * @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @flags: flag options for the current SPI NOR (SNOR_F_*)
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
- * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
+ * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
* @controller_ops: SPI NOR controller driver specific operations.
- * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
+ * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
* hooks, or dynamically when parsing the SFDP tables.
* @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
- * @priv: the private data
+ * @priv: pointer to the private data
*/
struct spi_nor {
struct mtd_info mtd;
diff --git a/include/linux/net.h b/include/linux/net.h
index e10f378194a5..016a9c5faa34 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -264,7 +264,8 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define net_dbg_ratelimited(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1a96e9c4ec36..39e28e11863c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1821,8 +1821,6 @@ enum netdev_priv_flags {
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
*
- * @addr_list_lock_key: lockdep class annotating
- * net_device->addr_list_lock spinlock
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
@@ -2125,7 +2123,6 @@ struct net_device {
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
- struct lock_class_key addr_list_lock_key;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
@@ -2217,10 +2214,13 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
static struct lock_class_key qdisc_tx_busylock_key; \
static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
(dev)->qdisc_running_key = &qdisc_running_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
&qdisc_xmit_lock_key); \
@@ -3157,7 +3157,7 @@ static inline int dev_recursion_level(void)
return this_cpu_read(softnet_data.xmit.recursion);
}
-#define XMIT_RECURSION_LIMIT 10
+#define XMIT_RECURSION_LIMIT 8
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
@@ -3253,7 +3253,6 @@ static inline void netif_stop_queue(struct net_device *dev)
}
void netif_tx_stop_all_queues(struct net_device *dev);
-void netdev_update_lockdep_key(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -4239,6 +4238,11 @@ static inline void netif_addr_lock(struct net_device *dev)
spin_lock(&dev->addr_list_lock);
}
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ spin_lock_nested(&dev->addr_list_lock, dev->lower_level);
+}
+
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
@@ -4942,7 +4946,8 @@ do { \
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_netdev_dbg(__dev, format, ##args); \
@@ -5012,7 +5017,8 @@ do { \
#define netif_info(priv, type, dev, fmt, args...) \
netif_level(info, priv, type, dev, fmt, ##args)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 82d8fb422092..4dba3c948932 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -38,7 +38,7 @@ struct nfs4_ace {
struct nfs4_acl {
uint32_t naces;
- struct nfs4_ace aces[0];
+ struct nfs4_ace aces[];
};
#define NFS4_MAXLABELLEN 2048
@@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_NOFILEHANDLE:
case NFS4ERR_MOVED:
return false;
- };
+ }
return true;
}
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 73eda45f1cfd..6ee9119acc5d 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -230,6 +230,7 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
#define NFS_INO_DATA_INVAL_DEFER \
BIT(13) /* Deferred cache invalidation */
+#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index e5f3e7d8d3d5..5fd0a9ef425f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 {
struct nfs4_secinfo_flavors {
unsigned int num_flavors;
- struct nfs4_secinfo4 flavors[0];
+ struct nfs4_secinfo4 flavors[];
};
struct nfs4_secinfo_arg {
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 8c13538aeffe..191b524e5c0d 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -478,7 +478,7 @@ void ntb_unregister_client(struct ntb_client *client);
int ntb_register_device(struct ntb_dev *ntb);
/**
- * ntb_register_device() - unregister a ntb device
+ * ntb_unregister_device() - unregister a ntb device
* @ntb: NTB device context.
*
* The device will be removed from the list of ntb devices. If the ntb device
@@ -1351,7 +1351,7 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
* @sidx: Scratchpad index.
* @spad_addr: OUT - The address of the peer scratchpad register.
*
- * Return the address of the peer doorbell register. This may be used, for
+ * Return the address of the peer scratchpad register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
*
* Return: Zero on success, otherwise an error number.
@@ -1373,7 +1373,7 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
*
* Read the peer scratchpad register, and return the value.
*
- * Return: The value of the local scratchpad register.
+ * Return: The value of the peer scratchpad register.
*/
static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 60f541912ccf..8216a4156263 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -3,6 +3,7 @@
#define __OF_RESERVED_MEM_H
#include <linux/device.h>
+#include <linux/of.h>
struct of_phandle_args;
struct reserved_mem_ops;
@@ -33,6 +34,9 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
+int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name);
void of_reserved_mem_device_release(struct device *dev);
void fdt_init_reserved_mem(void);
@@ -45,6 +49,14 @@ static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
{
return -ENOSYS;
}
+
+static inline int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
static inline void of_reserved_mem_device_release(struct device *pdev) { }
static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 693cae9bfe66..7302efff5e65 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -4,6 +4,9 @@
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
*/
#ifndef PADATA_H
@@ -24,7 +27,6 @@
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon.
- * @cpu: Cpu for parallelization.
* @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function.
@@ -34,7 +36,6 @@ struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
- int cpu;
unsigned int seq_nr;
int info;
void (*parallel)(struct padata_priv *padata);
@@ -68,15 +69,11 @@ struct padata_serial_queue {
/**
* struct padata_parallel_queue - The percpu padata parallel queue
*
- * @parallel: List to wait for parallelization.
* @reorder: List to wait for reordering after parallel processing.
- * @work: work struct for parallelization.
* @num_obj: Number of objects that are processed by this cpu.
*/
struct padata_parallel_queue {
- struct padata_list parallel;
struct padata_list reorder;
- struct work_struct work;
atomic_t num_obj;
};
@@ -111,7 +108,7 @@ struct parallel_data {
struct padata_parallel_queue __percpu *pqueue;
struct padata_serial_queue __percpu *squeue;
atomic_t refcnt;
- atomic_t seq_nr;
+ unsigned int seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
@@ -137,6 +134,31 @@ struct padata_shell {
};
/**
+ * struct padata_mt_job - represents one multithreaded job
+ *
+ * @thread_fn: Called for each chunk of work that a padata thread does.
+ * @fn_arg: The thread function argument.
+ * @start: The start of the job (units are job-specific).
+ * @size: size of this node's work (units are job-specific).
+ * @align: Ranges passed to the thread function fall on this boundary, with the
+ * possible exceptions of the beginning and end of the job.
+ * @min_chunk: The minimum chunk size in job-specific units. This allows
+ * the client to communicate the minimum amount of work that's
+ * appropriate for one worker thread to do at once.
+ * @max_threads: Max threads to use for the job, actual number may be less
+ * depending on task size and minimum chunk size.
+ */
+struct padata_mt_job {
+ void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
+ void *fn_arg;
+ unsigned long start;
+ unsigned long size;
+ unsigned long align;
+ unsigned long min_chunk;
+ int max_threads;
+};
+
+/**
* struct padata_instance - The overall control structure.
*
* @cpu_online_node: Linkage for CPU online callback.
@@ -166,6 +188,12 @@ struct padata_instance {
#define PADATA_INVALID 4
};
+#ifdef CONFIG_PADATA
+extern void __init padata_init(void);
+#else
+static inline void __init padata_init(void) {}
+#endif
+
extern struct padata_instance *padata_alloc_possible(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
@@ -173,6 +201,7 @@ extern void padata_free_shell(struct padata_shell *ps);
extern int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
+extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
extern int padata_start(struct padata_instance *pinst);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..6be1aa559b1e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy)
* not onlined when onlining the section).
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
+ *
+ * If a driver wants to allow to offline unmovable PageOffline() pages without
+ * putting them back to the buddy, it can do so via the memory notifier by
+ * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
+ * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
+ * pages (now with a reference count of zero) are treated like free pages,
+ * allowing the containing memory block to get offlined. A driver that
+ * relies on this feature is aware that re-onlining the memory block will
+ * require to re-set the pages PageOffline() and not giving them to the
+ * buddy via online_page_callback_t.
*/
PAGE_TYPE_OPS(Offline, offline)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8e085713150c..cf2468da68e9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -538,7 +538,7 @@ static inline int lock_page_killable(struct page *page)
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*
- * Return value and mmap_sem implications depend on flags; see
+ * Return value and mmap_lock implications depend on flags; see
* __lock_page_or_retry().
*/
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 13932ce8b37b..1fb508c19e83 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -325,18 +325,10 @@ struct pardev_cb {
unsigned int flags;
};
-/* parport_register_device declares that a device is connected to a
- port, and tells the kernel all it needs to know.
- - pf is the preemption function (may be NULL for no callback)
- - kf is the wake-up function (may be NULL for no callback)
- - irq_func is the interrupt handler (may be NULL for no interrupts)
- - handle is a user pointer that gets handed to callback functions. */
-struct pardevice *parport_register_device(struct parport *port,
- const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle);
-
+/*
+ * parport_register_dev_model declares that a device is connected to a
+ * port, and tells the kernel all it needs to know.
+ */
struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int cnt);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 2d155bfb8fbf..5ba475ca9078 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
struct pci_ecam_ops;
extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops);
+ const struct pci_ecam_ops **ecam_ops);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
@@ -107,10 +107,12 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
extern const guid_t pci_acpi_dsm_guid;
-#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05
-#define DEVICE_LABEL_DSM 0x07
-#define RESET_DELAY_DSM 0x08
-#define FUNCTION_DELAY_DSM 0x09
+
+/* _DSM Definitions for PCI */
+#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05
+#define DSM_PCI_DEVICE_NAME 0x07
+#define DSM_PCI_POWER_ON_RESET_DELAY 0x08
+#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09
#ifdef CONFIG_PCIE_EDR
void pci_acpi_add_edr_notifier(struct pci_dev *pdev);
@@ -125,10 +127,4 @@ static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
#endif /* CONFIG_ACPI */
-#ifdef CONFIG_ACPI_APEI
-extern bool aer_acpi_firmware_first(void);
-#else
-static inline bool aer_acpi_firmware_first(void) { return false; }
-#endif
-
#endif /* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index d08f0869f121..f75c307f346d 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -6,11 +6,14 @@
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
+bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
#else /* CONFIG_PCI_ATS */
+static inline bool pci_ats_supported(struct pci_dev *d)
+{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index a73164c85e78..1af5cb02ef7f 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -29,7 +29,7 @@ struct pci_config_window {
struct resource res;
struct resource busr;
void *priv;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
union {
void __iomem *win; /* 64-bit single mapping */
void __iomem **winp; /* 32-bit per-bus mapping */
@@ -40,29 +40,28 @@ struct pci_config_window {
/* create and free pci_config_window */
struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
- struct pci_ecam_ops *ops);
+ const struct pci_ecam_ops *ops);
void pci_ecam_free(struct pci_config_window *cfg);
/* map_bus when ->sysdata is an instance of pci_config_window */
void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
/* default ECAM ops */
-extern struct pci_ecam_ops pci_generic_ecam_ops;
+extern const struct pci_ecam_ops pci_generic_ecam_ops;
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
-extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
-extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
-extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
-extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
-extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
-extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
+extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
+extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
+extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
#endif
-#ifdef CONFIG_PCI_HOST_COMMON
+#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops);
+int pci_host_common_probe(struct platform_device *pdev);
int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index e0ed9d01f6e5..cc66bec8be90 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -66,19 +66,27 @@ struct pci_epc_ops {
};
/**
+ * struct pci_epc_mem_window - address window of the endpoint controller
+ * @phys_base: physical base address of the PCI address window
+ * @size: the size of the PCI address window
+ * @page_size: size of each page
+ */
+struct pci_epc_mem_window {
+ phys_addr_t phys_base;
+ size_t size;
+ size_t page_size;
+};
+
+/**
* struct pci_epc_mem - address space of the endpoint controller
- * @phys_base: physical base address of the PCI address space
- * @size: the size of the PCI address space
+ * @window: address window of the endpoint controller
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
- * @page_size: size of each page
* @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
- phys_addr_t phys_base;
- size_t size;
+ struct pci_epc_mem_window window;
unsigned long *bitmap;
- size_t page_size;
int pages;
/* mutex to protect against concurrent access for memory allocation*/
struct mutex lock;
@@ -89,7 +97,11 @@ struct pci_epc_mem {
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
* @ops: function pointers for performing endpoint operations
- * @mem: address space of the endpoint controller
+ * @windows: array of address space of the endpoint controller
+ * @mem: first window of the endpoint controller, which corresponds to
+ * default address space of the endpoint controller supporting
+ * single window.
+ * @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
@@ -100,7 +112,9 @@ struct pci_epc {
struct device dev;
struct list_head pci_epf;
const struct pci_epc_ops *ops;
+ struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
+ unsigned int num_windows;
u8 max_functions;
struct config_group *group;
/* mutex to protect against concurrent access of EP controller */
@@ -137,9 +151,6 @@ struct pci_epc_features {
#define devm_pci_epc_create(dev, ops) \
__devm_pci_epc_create((dev), (ops), THIS_MODULE)
-#define pci_epc_mem_init(epc, phys_addr, size) \
- __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE)
-
static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
{
dev_set_drvdata(&epc->dev, data);
@@ -195,8 +206,11 @@ unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
-int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size,
- size_t page_size);
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size);
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *window,
+ unsigned int num_windows);
void pci_epc_mem_exit(struct pci_epc *epc);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 83ce1cdf5676..c79d83304e52 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -100,9 +100,21 @@ enum {
PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
#endif
- /* Resources assigned to buses behind the bridge */
+/* PCI-to-PCI (P2P) bridge windows */
+#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+
+/* CardBus bridge windows */
+#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
+
+/* Total number of bridge resources for P2P and CardBus */
#define PCI_BRIDGE_RESOURCE_NUM 4
+ /* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
PCI_BRIDGE_RESOURCE_NUM - 1,
@@ -279,7 +291,7 @@ struct pci_cap_saved_data {
u16 cap_nr;
bool cap_extended;
unsigned int size;
- u32 data[0];
+ u32 data[];
};
struct pci_cap_saved_state {
@@ -420,8 +432,6 @@ struct pci_dev {
* mappings to make sure they cannot access arbitrary memory.
*/
unsigned int untrusted:1;
- unsigned int __aer_firmware_first_valid:1;
- unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
@@ -532,7 +542,7 @@ struct pci_host_bridge {
resource_size_t start,
resource_size_t size,
resource_size_t align);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -1025,7 +1035,6 @@ void pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
-struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
@@ -2048,6 +2057,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
+
+int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
int pci_iov_add_virtfn(struct pci_dev *dev, int id);
void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
int pci_num_vf(struct pci_dev *dev);
@@ -2073,6 +2084,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
+
+static inline int pci_iov_sysfs_link(struct pci_dev *dev,
+ struct pci_dev *virtfn, int id)
+{
+ return -ENODEV;
+}
static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
@@ -2143,17 +2160,23 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
+/**
+ * pcie_find_root_port - Get the PCIe root port device
+ * @dev: PCI device
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI/PCIe Device.
+ */
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- while (1) {
- if (!pci_is_pcie(dev))
- break;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- return dev;
- if (!dev->bus->self)
- break;
- dev = dev->bus->self;
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
+
+ while (bridge) {
+ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
+ return bridge;
+ bridge = pci_upstream_bridge(bridge);
}
+
return NULL;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1dfc4e1dcb94..0ad57693f392 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -550,6 +550,7 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
@@ -1832,6 +1833,12 @@
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+#define PCI_VENDOR_ID_PERICOM 0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
new file mode 100644
index 000000000000..32b6c52d41b9
--- /dev/null
+++ b/include/linux/pgtable.h
@@ -0,0 +1,1438 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGTABLE_H
+#define _LINUX_PGTABLE_H
+
+#include <linux/pfn.h>
+#include <asm/pgtable.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MMU
+
+#include <linux/mm_types.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <asm-generic/pgtable_uffd.h>
+
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
+#endif
+
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
+/*
+ * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
+ *
+ * The pXx_index() functions return the index of the entry in the page
+ * table page which would control the given virtual address
+ *
+ * As these functions may be used by the same code for different levels of
+ * the page table folding, they are always available, regardless of
+ * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
+ * because in such cases PTRS_PER_PxD equals 1.
+ */
+
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+
+#ifndef pmd_index
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+#define pmd_index pmd_index
+#endif
+
+#ifndef pud_index
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+#define pud_index pud_index
+#endif
+
+#ifndef pgd_index
+/* Must be a compile-time constant, so implement it as a macro */
+#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#endif
+
+#ifndef pte_offset_kernel
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+#define pte_offset_kernel pte_offset_kernel
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte))
+#else
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) ((void)(pte)) /* NOP */
+#endif
+
+/* Find an entry in the second-level page table.. */
+#ifndef pmd_offset
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+#define pmd_offset pmd_offset
+#endif
+
+#ifndef pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+}
+#define pud_offset pud_offset
+#endif
+
+static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
+{
+ return (pgd + pgd_index(address));
+};
+
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#ifndef pgd_offset
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
+#endif
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+
+/*
+ * In many cases it is known that a virtual address is mapped at PMD or PTE
+ * level, so instead of traversing all the page table levels, we can get a
+ * pointer to the PMD entry in user or kernel page table or translate a virtual
+ * address to the pointer in the PTE in the kernel page tables with simple
+ * helpers.
+ */
+static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
+}
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ pmd_t *pmd = pmd_off_k(vaddr);
+
+ return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
+}
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int r = 1;
+ if (!pte_young(pte))
+ r = 0;
+ else
+ set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+ return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ int r = 1;
+ if (!pmd_young(pmd))
+ r = 0;
+ else
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+ return r;
+}
+#else
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ pmd_clear(pmdp);
+ return pmd;
+}
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud = *pudp;
+
+ pud_clear(pudp);
+ return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp,
+ int full)
+{
+ return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ int full)
+{
+ pte_t pte;
+ pte = ptep_get_and_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+
+/*
+ * If two threads concurrently fault at the same page, the thread that
+ * won the race updates the PTE and its local TLB/Cache. The other thread
+ * gives up, simply does nothing, and continues; on architectures where
+ * software can update TLB, local TLB can be updated here to avoid next page
+ * fault. This function updates TLB only, do nothing with cache or others.
+ * It is the difference with function update_mmu_cache.
+ */
+#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
+static inline void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#endif
+
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ int full)
+{
+ pte_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pud_t *pudp);
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+/*
+ * On some architectures hardware does not set page access bit when accessing
+ * memory page, it is responsibilty of software setting this bit. It brings
+ * out extra page fault penalty to track page access bit. For optimization page
+ * access bit can be set during all page fault flow on these arches.
+ * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+ * where software maintains page access bit.
+ */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
+#ifndef pte_savedwrite
+#define pte_savedwrite pte_write
+#endif
+
+#ifndef pte_mk_savedwrite
+#define pte_mk_savedwrite pte_mkwrite
+#endif
+
+#ifndef pte_clear_savedwrite
+#define pte_clear_savedwrite pte_wrprotect
+#endif
+
+#ifndef pmd_savedwrite
+#define pmd_savedwrite pmd_write
+#endif
+
+#ifndef pmd_mk_savedwrite
+#define pmd_mk_savedwrite pmd_mkwrite
+#endif
+
+#ifndef pmd_clear_savedwrite
+#define pmd_clear_savedwrite pmd_wrprotect
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ pud_t old_pud = *pudp;
+
+ set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
+
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ return pud_val(pud_a) == pud_val(pud_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
+{
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
+{
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
+}
+#endif
+
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+#ifndef pte_accessible
+# define pte_accessible(mm, pte) ((void)(pte), 1)
+#endif
+
+#ifndef flush_tlb_fix_spurious_fault
+#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#endif
+
+#ifndef pgprot_nx
+#define pgprot_nx(prot) (prot)
+#endif
+
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+
+#ifndef __PAGETABLE_P4D_FOLDED
+void p4d_clear_bad(p4d_t *);
+#else
+#define p4d_clear_bad(p4d) do { } while (0)
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_clear_bad(pud_t *);
+#else
+#define pud_clear_bad(p4d) do { } while (0)
+#endif
+
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+ if (p4d_none(*p4d))
+ return 1;
+ if (unlikely(p4d_bad(*p4d))) {
+ p4d_clear_bad(p4d);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * Get the current pte state, but zero it out to make it
+ * non-present, preventing the hardware from asynchronously
+ * updating it.
+ */
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * The pte is non-present, so there's no hardware state to
+ * preserve.
+ */
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+/*
+ * Start a pte protection read-modify-write transaction, which
+ * protects against asynchronous hardware modifications to the pte.
+ * The intention is not to prevent the hardware from making pte
+ * updates, but to prevent any updates it may make from being lost.
+ *
+ * This does not protect against other software modifications of the
+ * pte; the appropriate pte lock must be held over the transation.
+ *
+ * Note that this interface is intended to be batchable, meaning that
+ * ptep_modify_prot_commit may not actually update the pte, but merely
+ * queue the update to be done at some later time. The update must be
+ * actually committed before the pte lock is released, however.
+ */
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ return __ptep_modify_prot_start(vma, addr, ptep);
+}
+
+/*
+ * Commit an update to a pte, leaving any hardware-controlled bits in
+ * the PTE unmodified.
+ */
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+ __ptep_modify_prot_commit(vma, addr, ptep, pte);
+}
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+#endif /* CONFIG_MMU */
+
+/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used used even if CONFIG_MMU is not defined.
+ */
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables and
+ * other process state with the actual context switch code for
+ * paravirtualized guests. By convention, only one of the batched
+ * update (lazy) modes (CPU, MMU) should be active at any given time,
+ * entry should never be nested, and entry and exits should always be
+ * paired. This is for sanity of maintaining and reasoning about the
+ * kernel code. In this case, the exit (end of the context switch) is
+ * in architecture-specific code, and so doesn't need a generic
+ * definition.
+ */
+#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
+#define arch_start_context_switch(prev) do {} while (0)
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interfaces that can be used by architecture code to keep track of
+ * memory type of pfn mappings specified by the remap_pfn_range,
+ * vmf_insert_pfn.
+ */
+
+/*
+ * track_pfn_remap is called when a _new_ pfn mapping is being established
+ * by remap_pfn_range() for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * track_pfn_insert is called when a _new_ single pfn is established
+ * by vmf_insert_pfn().
+ */
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
+{
+}
+
+/*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * untrack_pfn is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case pfn, size are zero).
+ */
+static inline void untrack_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+
+/*
+ * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
+ */
+static inline void untrack_pfn_moved(struct vm_area_struct *vma)
+{
+}
+#else
+extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
+extern int track_pfn_copy(struct vm_area_struct *vma);
+extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+extern void untrack_pfn_moved(struct vm_area_struct *vma);
+#endif
+
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+#ifndef pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ BUG();
+ return 0;
+}
+#endif /* pmd_write */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+static inline int pud_trans_huge(pud_t pud)
+{
+ return 0;
+}
+#endif
+
+/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
+static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+{
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ return 1;
+ if (unlikely(pud_bad(pudval))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+/* See pmd_trans_unstable for discussion. */
+static inline int pud_trans_unstable(pud_t *pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
+#else
+ return 0;
+#endif
+}
+
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_lock held in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_lock in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_lock is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+ * pmd_read_atomic is allowed to return a not atomic pmdval
+ * (for example pointing to an hugepage that has never been
+ * mapped in the pmd). The below checks will only care about
+ * the low part of the pmd with 32bit PAE x86 anyway, with the
+ * exception of pmd_none(). So the important thing is that if
+ * the low part of the pmd is found null, the high part will
+ * be also null or the pmd_none() check below would be
+ * confused.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+ /*
+ * !pmd_present() checks for pmd migration entries
+ *
+ * The complete check uses is_pmd_migration_entry() in linux/swapops.h
+ * But using that requires moving current function and pmd_trans_unstable()
+ * to linux/swapops.h to resovle dependency, which is too much code move.
+ *
+ * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
+ * because !pmd_present() pages can only be under migration not swapped
+ * out.
+ *
+ * pmd_none() is preseved for future condition checks on pmd migration
+ * entries and not confusing with this function name, although it is
+ * redundant with !pmd_present().
+ */
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
+ (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
+ return 1;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+ return 0;
+#endif
+}
+
+#ifndef CONFIG_NUMA_BALANCING
+/*
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+int p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#else
+#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
+#endif
+#endif
+
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
+extern void __init pgtable_cache_init(void);
+
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
+/*
+ * Architecture PAGE_KERNEL_* fallbacks
+ *
+ * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
+ * because they really don't support them, or the port needs to be updated to
+ * reflect the required functionality. Below are a set of relatively safe
+ * fallbacks, as best effort, which we can count on in lieu of the architectures
+ * not defining them on their own yet.
+ */
+
+#ifndef PAGE_KERNEL_RO
+# define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
+/*
+ * Page Table Modification bits for pgtbl_mod_mask.
+ *
+ * These are used by the p?d_alloc_track*() set of functions an in the generic
+ * vmalloc/ioremap code to track at which page-table levels entries have been
+ * modified. Based on that the code can better decide when vmalloc and ioremap
+ * mapping changes need to be synchronized to other page-tables in the system.
+ */
+#define __PGTBL_PGD_MODIFIED 0
+#define __PGTBL_P4D_MODIFIED 1
+#define __PGTBL_PUD_MODIFIED 2
+#define __PGTBL_PMD_MODIFIED 3
+#define __PGTBL_PTE_MODIFIED 4
+
+#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
+#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
+#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
+#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
+#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
+
+/* Page-Table Modification Mask */
+typedef unsigned int pgtbl_mod_mask;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef io_remap_pfn_range
+#define io_remap_pfn_range remap_pfn_range
+#endif
+
+#ifndef has_transparent_hugepage
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#else
+#define has_transparent_hugepage() 0
+#endif
+#endif
+
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x) 0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x) 0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x) 0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x) 0
+#endif
+
+#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h
index 5973a6313529..e23b52df93ec 100644
--- a/include/linux/phy/omap_usb.h
+++ b/include/linux/phy/omap_usb.h
@@ -2,68 +2,14 @@
/*
* omap_usb.h -- omap usb2 phy header file
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
#ifndef __DRIVERS_OMAP_USB2_H
#define __DRIVERS_OMAP_USB2_H
-#include <linux/io.h>
-#include <linux/usb/otg.h>
-
-struct usb_dpll_params {
- u16 m;
- u8 n;
- u8 freq:3;
- u8 sd;
- u32 mf;
-};
-
-enum omap_usb_phy_type {
- TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
- TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
- TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */
-};
-
-struct omap_usb {
- struct usb_phy phy;
- struct phy_companion *comparator;
- void __iomem *pll_ctrl_base;
- void __iomem *phy_base;
- struct device *dev;
- struct device *control_dev;
- struct clk *wkupclk;
- struct clk *optclk;
- u8 flags;
- enum omap_usb_phy_type type;
- struct regmap *syscon_phy_power; /* ctrl. reg. acces */
- unsigned int power_reg; /* power reg. index within syscon */
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-struct usb_phy_data {
- const char *label;
- u8 flags;
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-/* Driver Flags */
-#define OMAP_USB2_HAS_START_SRP (1 << 0)
-#define OMAP_USB2_HAS_SET_VBUS (1 << 1)
-#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2)
-
-#define OMAP_DEV_PHY_PD BIT(0)
-#define OMAP_USB2_PHY_PD BIT(28)
-
-#define AM437X_USB2_PHY_PD BIT(0)
-#define AM437X_USB2_OTG_PD BIT(1)
-#define AM437X_USB2_OTGVDET_EN BIT(19)
-#define AM437X_USB2_OTGSESSEND_EN BIT(20)
+#include <linux/usb/phy_companion.h>
#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
@@ -76,15 +22,4 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator)
}
#endif
-static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset)
-{
- return __raw_readl(addr + offset);
-}
-
-static inline void omap_usb_writel(void __iomem *addr, unsigned offset,
- u32 data)
-{
- __raw_writel(data, addr + offset);
-}
-
#endif /* __DRIVERS_OMAP_USB_H */
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 93543cbc0e6b..176d6cf80e7c 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -102,6 +102,7 @@ extern void attach_pid(struct task_struct *task, enum pid_type);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
+extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 4956e362e55e..5a5cb45ac57e 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -17,12 +17,6 @@
struct fs_pin;
-enum { /* definitions for pid_namespace's hide_pid field */
- HIDEPID_OFF = 0,
- HIDEPID_NO_ACCESS = 1,
- HIDEPID_INVISIBLE = 2,
-};
-
struct pid_namespace {
struct kref kref;
struct idr idr;
@@ -32,17 +26,11 @@ struct pid_namespace {
struct kmem_cache *pid_cachep;
unsigned int level;
struct pid_namespace *parent;
-#ifdef CONFIG_PROC_FS
- struct dentry *proc_self;
- struct dentry *proc_thread_self;
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
struct ucounts *ucounts;
- kgid_t pid_gid;
- int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
} __randomize_layout;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 0c31b9461262..50afd0d0084c 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -9,6 +9,10 @@
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */
+#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */
+#ifdef CONFIG_WATCH_QUEUE
+#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */
+#endif
/**
* struct pipe_buffer - a linux kernel pipe buffer
@@ -34,8 +38,10 @@ struct pipe_buffer {
* @wr_wait: writer wait point in case of full pipe
* @head: The point of buffer production
* @tail: The point of buffer consumption
+ * @note_loss: The next read() should insert a data-lost message
* @max_usage: The maximum number of slots that may be used in the ring
* @ring_size: total number of buffers (should be a power of 2)
+ * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs
* @tmp_page: cached released page
* @readers: number of current readers of this pipe
* @writers: number of current writers of this pipe
@@ -46,6 +52,7 @@ struct pipe_buffer {
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
* @user: the user who created this pipe
+ * @watch_queue: If this pipe is a watch_queue, this is the stuff for that
**/
struct pipe_inode_info {
struct mutex mutex;
@@ -54,6 +61,10 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
+ unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
@@ -64,6 +75,9 @@ struct pipe_inode_info {
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
struct user_struct *user;
+#ifdef CONFIG_WATCH_QUEUE
+ struct watch_queue *watch_queue;
+#endif
};
/*
@@ -239,9 +253,20 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+#ifdef CONFIG_WATCH_QUEUE
+unsigned long account_pipe_buffers(struct user_struct *user,
+ unsigned long old, unsigned long new);
+bool too_many_pipe_buffers_soft(unsigned long user_bufs);
+bool too_many_pipe_buffers_hard(unsigned long user_bufs);
+bool pipe_is_unprivileged_user(void);
+#endif
+
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+#ifdef CONFIG_WATCH_QUEUE
+int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
+#endif
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
-struct pipe_inode_info *get_pipe_info(struct file *file);
+struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
unsigned int round_pipe_size(unsigned long size);
diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h
deleted file mode 100644
index addd48cac625..000000000000
--- a/include/linux/platform_data/clk-integrator.h
+++ /dev/null
@@ -1,2 +0,0 @@
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id);
-void integrator_impd1_clk_exit(unsigned int id);
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 3c606c450d05..ff1be737bad6 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -12,7 +12,6 @@ struct dwapb_port_property {
unsigned int ngpio;
unsigned int gpio_base;
int irq[32];
- bool has_irq;
bool irq_shared;
};
diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h
index 6a9b28399b39..24953981bd9f 100644
--- a/include/linux/platform_data/i2c-pxa.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -7,54 +7,6 @@
#ifndef _I2C_PXA_H_
#define _I2C_PXA_H_
-#if 0
-#define DEF_TIMEOUT 3
-#else
-/* need a longer timeout if we're dealing with the fact we may well be
- * looking at a multi-master environment
-*/
-#define DEF_TIMEOUT 32
-#endif
-
-#define BUS_ERROR (-EREMOTEIO)
-#define XFER_NAKED (-ECONNREFUSED)
-#define I2C_RETRY (-2000) /* an error has occurred retry transmit */
-
-/* ICR initialize bit values
-*
-* 15. FM 0 (100 Khz operation)
-* 14. UR 0 (No unit reset)
-* 13. SADIE 0 (Disables the unit from interrupting on slave addresses
-* matching its slave address)
-* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration
-* in master mode)
-* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode)
-* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent)
-* 9. IRFIE 1 (Enable interrupts from full buffer received)
-* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty)
-* 7. GCD 1 (Disables i2c unit response to general call messages as a slave)
-* 6. IUE 0 (Disable unit until we change settings)
-* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL)
-* 4. MA 0 (Only send stop with the ICR stop bit)
-* 3. TB 0 (We are not transmitting a byte initially)
-* 2. ACKNAK 0 (Send an ACK after the unit receives a byte)
-* 1. STOP 0 (Do not send a STOP)
-* 0. START 0 (Do not send a START)
-*
-*/
-#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE)
-
-/* I2C status register init values
- *
- * 10. BED 1 (Clear bus error detected)
- * 9. SAD 1 (Clear slave address detected)
- * 7. IRF 1 (Clear IDBR Receive Full)
- * 6. ITE 1 (Clear IDBR Transmit Empty)
- * 5. ALD 1 (Clear Arbitration Loss Detected)
- * 4. SSD 1 (Clear Slave Stop Detected)
- */
-#define I2C_ISR_INIT 0x7FF /* status register init */
-
struct i2c_pxa_platform_data {
unsigned int class;
unsigned int use_pio :1;
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index 08e639e047e5..03e92c71b3fa 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_mode ecc_mode;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index deb849bcf0ec..08675b16f9e1 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -49,7 +49,7 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_mode ecc_mode;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 747861816f4f..d5c4a329321d 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -42,6 +42,18 @@ struct dev_pm_opp_supply {
};
/**
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
+ *
+ * This structure stores the bandwidth values for a single interconnect path.
+ */
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
+};
+
+/**
* struct dev_pm_opp_info - OPP freq/voltage/current values
* @rate: Target clk rate in hz
* @supplies: Array of voltage/current values for all power supplies
@@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
void dev_pm_opp_of_register_em(struct cpumask *cpus);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
@@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
{
return -ENOTSUPP;
}
+
+static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
+{
+ return -ENOTSUPP;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index dcd5a71e6c67..ac1345a48ad0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -61,6 +61,7 @@ enum {
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_OVERCURRENT,
+ POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED,
};
enum {
@@ -139,6 +140,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_MAX,
@@ -158,6 +160,9 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -223,9 +228,9 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
- enum power_supply_usb_type *usb_types;
+ const enum power_supply_usb_type *usb_types;
size_t num_usb_types;
- enum power_supply_property *properties;
+ const enum power_supply_property *properties;
size_t num_properties;
/*
@@ -346,8 +351,12 @@ struct power_supply_battery_info {
int charge_full_design_uah; /* microAmp-hours */
int voltage_min_design_uv; /* microVolts */
int voltage_max_design_uv; /* microVolts */
+ int tricklecharge_current_ua; /* microAmps */
int precharge_current_ua; /* microAmps */
+ int precharge_voltage_max_uv; /* microVolts */
int charge_term_current_ua; /* microAmps */
+ int charge_restart_voltage_uv; /* microVolts */
+ int overvoltage_limit_uv; /* microVolts */
int constant_charge_current_max_ua; /* microAmps */
int constant_charge_voltage_max_uv; /* microVolts */
int factory_internal_resistance_uohm; /* microOhms */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 3cc2f178bf06..fc8f03c54543 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -399,7 +399,8 @@ extern int kptr_restrict;
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#include <linux/dynamic_debug.h>
/**
@@ -535,7 +536,8 @@ extern int kptr_restrict;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define pr_debug_ratelimited(fmt, ...) \
do { \
@@ -582,7 +584,8 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
#endif
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index ad4ff711fc02..d1eed1b43651 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -42,6 +42,34 @@ struct proc_ops {
unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
} __randomize_layout;
+/* definitions for hide_pid field */
+enum proc_hidepid {
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+ HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */
+};
+
+/* definitions for proc mount option pidonly */
+enum proc_pidonly {
+ PROC_PIDONLY_OFF = 0,
+ PROC_PIDONLY_ON = 1,
+};
+
+struct proc_fs_info {
+ struct pid_namespace *pid_ns;
+ struct dentry *proc_self; /* For /proc/self */
+ struct dentry *proc_thread_self; /* For /proc/thread-self */
+ kgid_t pid_gid;
+ enum proc_hidepid hide_pid;
+ enum proc_pidonly pidonly;
+};
+
+static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
#ifdef CONFIG_PROC_FS
typedef int (*proc_write_t)(struct file *, char *, size_t);
@@ -177,9 +205,9 @@ int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
/* get the associated pid namespace for a file in procfs */
-static inline struct pid_namespace *proc_pid_ns(const struct inode *inode)
+static inline struct pid_namespace *proc_pid_ns(struct super_block *sb)
{
- return inode->i_sb->s_fs_info;
+ return proc_sb_info(sb)->pid_ns;
}
bool proc_ns_file(const struct file *file);
diff --git a/include/linux/property.h b/include/linux/property.h
index c7b5f3db36aa..10d03572f52e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -444,6 +444,7 @@ int software_node_register_node_group(const struct software_node **node_group);
void software_node_unregister_node_group(const struct software_node **node_group);
int software_node_register(const struct software_node *node);
+void software_node_unregister(const struct software_node *node);
int software_node_notify(struct device *dev, unsigned long action);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 48325d7790f8..8cb76405cbce 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -828,6 +828,7 @@ struct qed_common_cb_ops {
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
+ void (*bw_update)(void *dev);
};
struct qed_selftest_ops {
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 584077565f12..2d3ddd2b85e0 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -98,7 +98,6 @@ struct qed_rdma_device {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 7c3debb47c87..1f4048bf2674 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; }
#endif
#ifdef CONFIG_RAS_CEC
-void __init cec_init(void);
int __init parse_cec_param(char *str);
-int cec_add_elem(u64 pfn);
-#else
-static inline void __init cec_init(void) { }
-static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
#ifdef CONFIG_RAS
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 7375bb3da140..df587d181844 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -506,6 +506,27 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
WRITE_ONCE(old->pprev, LIST_POISON2);
}
+/**
+ * hlists_swap_heads_rcu - swap the lists the hlist heads point to
+ * @left: The hlist head on the left
+ * @right: The hlist head on the right
+ *
+ * The lists start out as [@left ][node1 ... ] and
+ [@right ][node2 ... ]
+ * The lists end up as [@left ][node2 ... ]
+ * [@right ][node1 ... ]
+ */
+static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
+{
+ struct hlist_node *node1 = left->first;
+ struct hlist_node *node2 = right->first;
+
+ rcu_assign_pointer(left->first, node2);
+ rcu_assign_pointer(right->first, node1);
+ WRITE_ONCE(node2->pprev, &left->first);
+ WRITE_ONCE(node1->pprev, &right->first);
+}
+
/*
* return the first or the next element in an RCU protected hlist
*/
diff --git a/include/linux/regset.h b/include/linux/regset.h
index bf0243779738..46d6ae68c455 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos,
if (*kbuf) {
memset(*kbuf, 0, copy);
*kbuf += copy;
- } else if (__clear_user(*ubuf, copy))
+ } else if (clear_user(*ubuf, copy))
return -EFAULT;
else
*ubuf += copy;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c07d7958c53..e7b7bab8b235 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -73,7 +73,7 @@ struct resource_table {
u32 ver;
u32 num;
u32 reserved[2];
- u32 offset[0];
+ u32 offset[];
} __packed;
/**
@@ -87,7 +87,7 @@ struct resource_table {
*/
struct fw_rsc_hdr {
u32 type;
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -306,7 +306,7 @@ struct fw_rsc_vdev {
u8 status;
u8 num_of_vrings;
u8 reserved[2];
- struct fw_rsc_vdev_vring vring[0];
+ struct fw_rsc_vdev_vring vring[];
} __packed;
struct rproc;
@@ -355,6 +355,8 @@ enum rsc_handling_status {
/**
* struct rproc_ops - platform-specific device handlers
+ * @prepare: prepare device for code loading
+ * @unprepare: unprepare device after stop
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
@@ -373,6 +375,8 @@ enum rsc_handling_status {
* panic at least the returned number of milliseconds
*/
struct rproc_ops {
+ int (*prepare)(struct rproc *rproc);
+ int (*unprepare)(struct rproc *rproc);
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
@@ -489,7 +493,7 @@ struct rproc {
struct list_head node;
struct iommu_domain *domain;
const char *name;
- char *firmware;
+ const char *firmware;
void *priv;
struct rproc_ops *ops;
struct device dev;
@@ -518,6 +522,7 @@ struct rproc {
struct list_head dump_segments;
int nb_vdev;
u8 elf_class;
+ u16 elf_machine;
};
/**
@@ -599,6 +604,11 @@ int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
+int devm_rproc_add(struct device *dev, struct rproc *rproc);
+
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
struct rproc_mem_entry *
@@ -622,6 +632,7 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
struct rproc_dump_segment *segment,
void *dest),
void *priv);
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 988d176472df..3a6adfa70fb0 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -77,7 +77,7 @@ struct anon_vma {
struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
- struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index 96e26d94719f..daded9fddf36 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -12,6 +12,7 @@ struct qcom_glink;
struct qcom_glink *qcom_glink_smem_register(struct device *parent,
struct device_node *node);
void qcom_glink_smem_unregister(struct qcom_glink *glink);
+void qcom_glink_ssr_notify(const char *ssr_name);
#else
@@ -23,7 +24,7 @@ qcom_glink_smem_register(struct device *parent,
}
static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
-
+static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
#endif
#endif
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 65b8142a7fed..e8780d4e4636 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1080,11 +1080,7 @@ struct pcr_ops {
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
- int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
- int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val);
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
- void (*full_on)(struct rtsx_pcr *pcr);
- void (*power_saving)(struct rtsx_pcr *pcr);
void (*enable_ocp)(struct rtsx_pcr *pcr);
void (*disable_ocp)(struct rtsx_pcr *pcr);
void (*init_ocp)(struct rtsx_pcr *pcr);
@@ -1108,13 +1104,6 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
#define L1_SNOOZE_TEST_EN BIT(5)
#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
-enum dev_aspm_mode {
- DEV_ASPM_DYNAMIC,
- DEV_ASPM_BACKDOOR,
- DEV_ASPM_STATIC,
- DEV_ASPM_DISABLE,
-};
-
/*
* struct rtsx_cr_option - card reader option
* @dev_flags: device flags
@@ -1125,7 +1114,6 @@ enum dev_aspm_mode {
* @ltr_active_latency: ltr mode active latency
* @ltr_idle_latency: ltr mode idle latency
* @ltr_l1off_latency: ltr mode l1off latency
- * @dev_aspm_mode: device aspm mode
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
@@ -1142,7 +1130,6 @@ struct rtsx_cr_option {
u32 ltr_active_latency;
u32 ltr_idle_latency;
u32 ltr_l1off_latency;
- enum dev_aspm_mode dev_aspm_mode;
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
@@ -1320,18 +1307,6 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
return (u8 *)(pcr->host_cmds_ptr);
}
-static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
- u8 mask, u8 append)
-{
- int err;
- u8 val;
-
- err = pci_read_config_byte(pcr->pci, addr, &val);
- if (err < 0)
- return err;
- return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
-}
-
static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
{
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6eec50fb36c8..4f922afb607a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+/*
+ * Loop over each sg element in the given sg_table object.
+ */
+#define for_each_sgtable_sg(sgt, sg, i) \
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+
+/*
+ * Loop over each sg element in the given *DMA mapped* sg_table object.
+ * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
+ * of the each element.
+ */
+#define for_each_sgtable_dma_sg(sgt, sg, i) \
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
@@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
- * @dma_iter: page iterator to hold current page
+ * @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
+/**
+ * for_each_sgtable_page - iterate over all pages in the sg_table object
+ * @sgt: sg_table object to iterate over
+ * @piter: page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all memory pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
+ */
+#define for_each_sgtable_page(sgt, piter, pgoffset) \
+ for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+
+/**
+ * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
+ * @sgt: sg_table object to iterate over
+ * @dma_iter: DMA page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all DMA mapped pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
+ * unit.
+ */
+#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
+ for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+
+
/*
* Mapping sg iterator
*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 57a5ce9f33c5..b62e6aaf28f0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1197,6 +1198,9 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
@@ -1247,6 +1251,9 @@ struct task_struct {
/* KCOV sequence number: */
int kcov_sequence;
+
+ /* Collect coverage from softirq context: */
+ unsigned int kcov_softirq;
#endif
#ifdef CONFIG_MEMCG
@@ -1301,7 +1308,9 @@ struct task_struct {
#ifdef CONFIG_X86_MCE
u64 mce_addr;
- u64 mce_status;
+ __u64 mce_ripv : 1,
+ mce_whole_page : 1,
+ __mce_reserved : 62;
struct callback_head mce_kill_me;
#endif
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index 95fb9e025247..00c45a0e6abe 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *);
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
-extern void show_stack(struct task_struct *task, unsigned long *sp);
+extern void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl);
extern void sched_show_task(struct task_struct *p);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index a132d875d351..480a4d1b7dd8 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -53,7 +53,7 @@ void mmdrop(struct mm_struct *mm);
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
- * followed by taking the mmap_sem for writing before modifying the
+ * followed by taking the mmap_lock for writing before modifying the
* vmas or anything the coredump pretends not to change from under it.
*
* It also has to be called when mmgrab() is used in the context of
@@ -61,14 +61,14 @@ void mmdrop(struct mm_struct *mm);
* the context of the process to run down_write() on that pinned mm.
*
* NOTE: find_extend_vma() called from GUP context is the only place
- * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * that can modify the "mm" (notably the vm_start/end) under mmap_lock
* for reading and outside the context of the process, so it is also
- * the only case that holds the mmap_sem for reading that must call
- * this function. Generally if the mmap_sem is hold for reading
+ * the only case that holds the mmap_lock for reading that must call
+ * this function. Generally if the mmap_lock is hold for reading
* there's no need of this check after get_task_mm()/mmget_not_zero().
*
* This function can be obsoleted and the check can be removed, after
- * the coredump code will hold the mmap_sem for writing before
+ * the coredump code will hold the mmap_lock for writing before
* invoking the ->core_dump methods.
*/
static inline bool mmget_still_valid(struct mm_struct *mm)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3e5b090c16d4..0ee5e696c5d8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -654,17 +654,6 @@ static inline bool thread_group_leader(struct task_struct *p)
return p->exit_signal >= 0;
}
-/* Do to the insanities of de_thread it is possible for a process
- * to have the pid of the thread group leader without actually being
- * the thread group leader. For iteration through the pids in proc
- * all we care about is that we have a task with the appropriate
- * pid, we don't actually care if we have the right task.
- */
-static inline bool has_group_leader_pid(struct task_struct *p)
-{
- return task_pid(p) == task_tgid(p);
-}
-
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 7b4d3a49b6c5..660ac49f2b53 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -7,6 +7,13 @@
struct ctl_table;
#ifdef CONFIG_DETECT_HUNG_TASK
+
+#ifdef CONFIG_SMP
+extern unsigned int sysctl_hung_task_all_cpu_backtrace;
+#else
+#define sysctl_hung_task_all_cpu_backtrace 0
+#endif /* CONFIG_SMP */
+
extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_timeout_secs;
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 5c873a59b387..ce2f5c28b2df 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -4,6 +4,10 @@
*
* Copyright (C) 2018 ARM Ltd.
*/
+
+#ifndef _LINUX_SCMI_PROTOCOL_H
+#define _LINUX_SCMI_PROTOCOL_H
+
#include <linux/device.h>
#include <linux/types.h>
@@ -319,3 +323,5 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
void scmi_protocol_unregister(int protocol_id);
+
+#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index ecb004711acf..afbf8037d8db 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -4,6 +4,10 @@
*
* Copyright (C) 2014 ARM Ltd.
*/
+
+#ifndef _LINUX_SCPI_PROTOCOL_H
+#define _LINUX_SCPI_PROTOCOL_H
+
#include <linux/types.h>
struct scpi_opp {
@@ -71,3 +75,5 @@ struct scpi_ops *get_scpi_ops(void);
#else
static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
#endif
+
+#endif /* _LINUX_SCPI_PROTOCOL_H */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 8ccd82105de8..76731230bbc5 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -221,7 +221,7 @@ struct sctp_datahdr {
__be16 stream;
__be16 ssn;
__u32 ppid;
- __u8 payload[0];
+ __u8 payload[];
};
struct sctp_data_chunk {
@@ -269,7 +269,7 @@ struct sctp_inithdr {
__be16 num_outbound_streams;
__be16 num_inbound_streams;
__be32 initial_tsn;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_init_chunk {
@@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param {
/* Section 3.3.2.1 Host Name Address (11) */
struct sctp_hostname_param {
struct sctp_paramhdr param_hdr;
- uint8_t hostname[0];
+ uint8_t hostname[];
};
/* Section 3.3.2.1 Supported Address Types (12) */
struct sctp_supported_addrs_param {
struct sctp_paramhdr param_hdr;
- __be16 types[0];
+ __be16 types[];
};
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
@@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param {
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
+ __u8 chunks[];
};
/* AUTH Section 3.1 Random */
struct sctp_random_param {
struct sctp_paramhdr param_hdr;
- __u8 random_val[0];
+ __u8 random_val[];
};
/* AUTH Section 3.2 Chunk List */
struct sctp_chunks_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
+ __u8 chunks[];
};
/* AUTH Section 3.3 HMAC Algorithm */
struct sctp_hmac_algo_param {
struct sctp_paramhdr param_hdr;
- __be16 hmac_ids[0];
+ __be16 hmac_ids[];
};
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
@@ -350,7 +350,7 @@ struct sctp_initack_chunk {
/* Section 3.3.3.1 State Cookie (7) */
struct sctp_cookie_param {
struct sctp_paramhdr p;
- __u8 body[0];
+ __u8 body[];
};
/* Section 3.3.3.1 Unrecognized Parameters (8) */
@@ -384,7 +384,7 @@ struct sctp_sackhdr {
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- union sctp_sack_variable variable[0];
+ union sctp_sack_variable variable[];
};
struct sctp_sack_chunk {
@@ -436,7 +436,7 @@ struct sctp_shutdown_chunk {
struct sctp_errhdr {
__be16 cause;
__be16 length;
- __u8 variable[0];
+ __u8 variable[];
};
struct sctp_operr_chunk {
@@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip {
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_fwdtsn_skip skip[0];
+ struct sctp_fwdtsn_skip skip[];
};
struct sctp_fwdtsn_chunk {
@@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip {
struct sctp_ifwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_ifwdtsn_skip skip[0];
+ struct sctp_ifwdtsn_skip skip[];
};
struct sctp_ifwdtsn_chunk {
@@ -658,7 +658,7 @@ struct sctp_addip_param {
struct sctp_addiphdr {
__be32 serial;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_addip_chunk {
@@ -718,7 +718,7 @@ struct sctp_addip_chunk {
struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
- __u8 hmac[0];
+ __u8 hmac[];
};
struct sctp_auth_chunk {
@@ -733,7 +733,7 @@ struct sctp_infox {
struct sctp_reconf_chunk {
struct sctp_chunkhdr chunk_hdr;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_strreset_outreq {
@@ -741,13 +741,13 @@ struct sctp_strreset_outreq {
__be32 request_seq;
__be32 response_seq;
__be32 send_reset_at_tsn;
- __be16 list_of_streams[0];
+ __be16 list_of_streams[];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
__be32 request_seq;
- __be16 list_of_streams[0];
+ __be16 list_of_streams[];
};
struct sctp_strreset_tsnreq {
diff --git a/include/linux/security.h b/include/linux/security.h
index 6aa229b252ce..0a0a03b36a3b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -56,6 +56,8 @@ struct mm_struct;
struct fs_context;
struct fs_parameter;
enum fs_value_type;
+struct watch;
+struct watch_notification;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -140,7 +142,7 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_set_creds(struct linux_binprm *bprm);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
@@ -276,7 +278,8 @@ int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
-int security_bprm_set_creds(struct linux_binprm *bprm);
+int security_bprm_creds_for_exec(struct linux_binprm *bprm);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
@@ -389,6 +392,8 @@ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
+int security_task_fix_setgid(struct cred *new, const struct cred *old,
+ int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
@@ -569,9 +574,15 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}
-static inline int security_bprm_set_creds(struct linux_binprm *bprm)
+static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ return 0;
+}
+
+static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
+ struct file *file)
{
- return cap_bprm_set_creds(bprm);
+ return cap_bprm_creds_from_file(bprm, file);
}
static inline int security_bprm_check(struct linux_binprm *bprm)
@@ -1027,6 +1038,13 @@ static inline int security_task_fix_setuid(struct cred *new,
return cap_task_fix_setuid(new, old, flags);
}
+static inline int security_task_fix_setgid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+ return 0;
+}
+
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -1275,6 +1293,28 @@ static inline int security_locked_down(enum lockdown_reason what)
}
#endif /* CONFIG_SECURITY */
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n);
+#else
+static inline int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+int security_watch_key(struct key *key);
+#else
+static inline int security_watch_key(struct key *key)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SECURITY_NETWORK
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
@@ -1743,8 +1783,8 @@ static inline int security_path_chroot(const struct path *path)
int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
void security_key_free(struct key *key);
-int security_key_permission(key_ref_t key_ref,
- const struct cred *cred, unsigned perm);
+int security_key_permission(key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm);
int security_key_getsecurity(struct key *key, char **_buffer);
#else
@@ -1762,7 +1802,7 @@ static inline void security_key_free(struct key *key)
static inline int security_key_permission(key_ref_t key_ref,
const struct cred *cred,
- unsigned perm)
+ enum key_need_perm need_perm)
{
return 0;
}
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 1672cf6f7614..813614d4b71f 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -145,6 +145,25 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#define DEFINE_SEQ_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ int ret = seq_open(file, &__name ## _sops); \
+ if (!ret && inode->i_private) { \
+ struct seq_file *seq_f = file->private_data; \
+ seq_f->private = inode->i_private; \
+ } \
+ return ret; \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_release, \
+}
+
#define DEFINE_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..8b97204f35a7 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,9 +37,25 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/processor.h>
/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of seqlocks via seqlock_t
+ * interface is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
@@ -115,6 +131,7 @@ repeat:
cpu_relax();
goto repeat;
}
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret & ~1;
}
@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
/**
@@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
+ kcsan_nestable_atomic_begin();
s->sequence+=2;
+ kcsan_nestable_atomic_end();
}
typedef struct {
@@ -430,11 +463,21 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 92f5eba86052..9fd550e7946a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
#include <linux/spinlock.h>
@@ -251,6 +252,7 @@ struct uart_port {
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
@@ -472,5 +474,5 @@ extern int uart_handle_break(struct uart_port *port);
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
-void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf);
+int uart_get_rs485_mode(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 86281ac7c305..860e0f843c12 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page)
#endif
#ifndef set_mce_nospec
-static inline int set_mce_nospec(unsigned long pfn)
+static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
return 0;
}
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
new file mode 100644
index 000000000000..7bab5d9a3d31
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef __MTK_MMSYS_H
+#define __MTK_MMSYS_H
+
+enum mtk_ddp_comp_id;
+struct device;
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 00f5826092e3..9c27a32df9bb 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -291,8 +291,8 @@ struct sdw_dpn_audio_mode {
* implementation-defined interrupts
* @max_ch: Maximum channels supported
* @min_ch: Minimum channels supported
- * @num_ch: Number of discrete channels supported
- * @ch: Discrete channels supported
+ * @num_channels: Number of discrete channels supported
+ * @channels: Discrete channels supported
* @num_ch_combinations: Number of channel combinations supported
* @ch_combinations: Channel combinations supported
* @modes: SDW mode supported
@@ -316,8 +316,8 @@ struct sdw_dpn_prop {
u32 imp_def_interrupts;
u32 max_ch;
u32 min_ch;
- u32 num_ch;
- u32 *ch;
+ u32 num_channels;
+ u32 *channels;
u32 num_ch_combinations;
u32 *ch_combinations;
u32 modes;
@@ -632,6 +632,19 @@ struct sdw_slave {
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
+/**
+ * struct sdw_master_device - SoundWire 'Master Device' representation
+ * @dev: Linux device for this Master
+ * @bus: Bus handle shortcut
+ */
+struct sdw_master_device {
+ struct device dev;
+ struct sdw_bus *bus;
+};
+
+#define dev_to_sdw_master_device(d) \
+ container_of(d, struct sdw_master_device, dev)
+
struct sdw_driver {
const char *name;
@@ -787,8 +800,10 @@ struct sdw_master_ops {
/**
* struct sdw_bus - SoundWire bus
- * @dev: Master linux device
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
* @link_id: Link id number, can be 0 to N, unique for each Master
+ * @id: bus system-wide unique id
* @slaves: list of Slaves on this bus
* @assigned: Bitmap for Slave device numbers.
* Bit set implies used number, bit clear implies unused number.
@@ -812,7 +827,9 @@ struct sdw_master_ops {
*/
struct sdw_bus {
struct device *dev;
+ struct sdw_master_device *md;
unsigned int link_id;
+ int id;
struct list_head slaves;
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
@@ -832,8 +849,9 @@ struct sdw_bus {
bool multi_link;
};
-int sdw_add_bus_master(struct sdw_bus *bus);
-void sdw_delete_bus_master(struct sdw_bus *bus);
+int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode);
+void sdw_bus_master_delete(struct sdw_bus *bus);
/**
* sdw_port_config: Master or Slave Port configuration
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index aaa7f4267c14..52eb66cd11bc 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -5,6 +5,13 @@
#define __SOUNDWIRE_TYPES_H
extern struct bus_type sdw_bus_type;
+extern struct device_type sdw_slave_type;
+extern struct device_type sdw_master_type;
+
+static inline int is_sdw_slave(const struct device *dev)
+{
+ return dev->type == &sdw_slave_type;
+}
#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
@@ -14,7 +21,7 @@ extern struct bus_type sdw_bus_type;
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
-int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
+int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env);
/**
* module_sdw_driver() - Helper macro for registering a Soundwire driver
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
deleted file mode 100644
index 831a5de7a0e2..000000000000
--- a/include/linux/spi/l4f00242t03.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD
- *
- * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- * Based on Marek Vasut work in lms283gf05.h
-*/
-
-#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
-#define _INCLUDE_LINUX_SPI_L4F00242T03_H_
-
-struct l4f00242t03_pdata {
- unsigned int reset_gpio;
- unsigned int data_enable_gpio;
-};
-
-#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
deleted file mode 100644
index 738a45b435f2..000000000000
--- a/include/linux/spi/mcp23s08.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-struct mcp23s08_platform_data {
- /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI
- * chipselect, each providing 1 gpio_chip instance with 8 gpios.
- * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI
- * chipselect, each providing 1 gpio_chip (port A + port B) with
- * 16 gpios.
- */
- u32 spi_present_mask;
-
- /* "base" is the number of the first GPIO or -1 for dynamic
- * assignment. If there are gaps in chip addressing the GPIO
- * numbers are sequential .. so for example if only slaves 0
- * and 3 are present, their GPIOs range from base to base+15
- * (or base+31 for s17 variant).
- */
- unsigned base;
-};
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 83bd8cb475d7..b7af8cc13eda 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -64,7 +64,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
- int skip; /* input argument: How many entries to skip */
+ unsigned int skip; /* input argument: How many entries to skip */
};
extern void save_stack_trace(struct stack_trace *trace);
diff --git a/include/linux/string.h b/include/linux/string.h
index 6dfbb2efa815..9b7a0632e87a 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -272,6 +272,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+
+#ifdef CONFIG_KASAN
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+#endif
+
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
@@ -279,14 +304,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
- return __builtin_strncpy(p, q, size);
+ return __underlying_strncpy(p, q, size);
}
__FORTIFY_INLINE char *strcat(char *p, const char *q)
{
size_t p_size = __builtin_object_size(p, 0);
if (p_size == (size_t)-1)
- return __builtin_strcat(p, q);
+ return __underlying_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
fortify_panic(__func__);
return p;
@@ -300,7 +325,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
/* Work around gcc excess stack consumption issue */
if (p_size == (size_t)-1 ||
(__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
- return __builtin_strlen(p);
+ return __underlying_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
fortify_panic(__func__);
@@ -333,7 +358,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
__write_overflow();
if (len >= p_size)
fortify_panic(__func__);
- __builtin_memcpy(p, q, len);
+ __underlying_memcpy(p, q, len);
p[len] = '\0';
}
return ret;
@@ -346,12 +371,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strncat(p, q, count);
+ return __underlying_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
if (p_size < p_len + copy_len + 1)
fortify_panic(__func__);
- __builtin_memcpy(p + p_len, q, copy_len);
+ __underlying_memcpy(p + p_len, q, copy_len);
p[p_len + copy_len] = '\0';
return p;
}
@@ -363,7 +388,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
- return __builtin_memset(p, c, size);
+ return __underlying_memset(p, c, size);
}
__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
@@ -378,7 +403,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
- return __builtin_memcpy(p, q, size);
+ return __underlying_memcpy(p, q, size);
}
__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
@@ -393,7 +418,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
- return __builtin_memmove(p, q, size);
+ return __underlying_memmove(p, q, size);
}
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
@@ -419,7 +444,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
- return __builtin_memcmp(p, q, size);
+ return __underlying_memcmp(p, q, size);
}
__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
@@ -429,7 +454,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
- return __builtin_memchr(p, c, size);
+ return __underlying_memchr(p, c, size);
}
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
@@ -460,11 +485,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strcpy(p, q);
+ return __underlying_strcpy(p, q);
memcpy(p, q, strlen(q) + 1);
return p;
}
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_memcpy
+#undef __underlying_memmove
+#undef __underlying_memset
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
#endif
/**
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 4f6b28487f28..98da816b5fc2 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -76,7 +76,7 @@ struct rpc_auth {
unsigned int au_verfsize; /* size of reply verifier */
unsigned int au_ralign; /* words before UL header */
- unsigned int au_flags;
+ unsigned long au_flags;
const struct rpc_authops *au_ops;
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
* differ from the flavor in
@@ -89,7 +89,8 @@ struct rpc_auth {
};
/* rpc_auth au_flags */
-#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+#define RPCAUTH_AUTH_DATATOUCH (1)
+#define RPCAUTH_AUTH_UPDATE_SLACK (2)
struct rpc_auth_create_args {
rpc_authflavor_t pseudoflavor;
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bc07e51f20d1..bf4ac8a0268c 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -84,6 +84,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ struct auth_domain *domain;
bool datatouch;
};
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fd390894a584..386628b36bc7 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -254,6 +254,7 @@ struct svc_rqst {
struct page * *rq_page_end; /* one past the last page */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
+ struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -299,6 +300,7 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ void ** rq_lease_breaker; /* The v4 client breaking a lease */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cbcfbd0521e3..7ed82625dc0b 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,7 +48,6 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
-#define SVCRDMA_DEBUG
/* Default and maximum inline threshold sizes */
enum {
@@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt {
};
/* svc_rdma_backchannel.c */
-extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
- __be32 *rdma_resp,
- struct xdr_buf *rcvbuf);
+extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *rctxt);
/* svc_rdma_recvfrom.c */
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 9e1e046de176..aca35ab5cff2 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u
return 0;
}
+static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt)
+{
+ return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) ||
+ (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0);
+}
+
int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index ca39a388dc22..f09c82b0a7ae 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -20,7 +20,8 @@ int gss_svc_init(void);
void gss_svc_shutdown(void);
int gss_svc_init_net(struct net *net);
void gss_svc_shutdown_net(struct net *net);
-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
+ char *name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 771baadaee9d..b7ac7fe68306 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,7 +28,7 @@ struct svc_sock {
/* private TCP part */
/* On-the-wire fragment header: */
- __be32 sk_reclen;
+ __be32 sk_marker;
/* As we receive a record, this includes the length received so
* far (including the fragment header): */
u32 sk_tcplen;
@@ -41,12 +41,12 @@ struct svc_sock {
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+ return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK;
}
static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+ return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT;
}
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e92176fc8824..4c5974bb9ba9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -334,9 +334,10 @@ extern unsigned long nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
+extern void lru_note_cost(struct lruvec *lruvec, bool file,
+ unsigned int nr_pages);
+extern void lru_note_cost_page(struct page *);
extern void lru_cache_add(struct page *);
-extern void lru_cache_add_anon(struct page *page);
-extern void lru_cache_add_file(struct page *page);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);
@@ -651,11 +652,9 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
#endif
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
- gfp_t gfp_mask);
+extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
#else
-static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
- int node, gfp_t gfp_mask)
+static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
{
}
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index f2401e45a3c2..50bb7f383a1b 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -197,6 +197,7 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init(void);
+void do_sysctl_args(void);
extern int pwrsw_enabled;
extern int unaligned_enabled;
@@ -235,6 +236,9 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
{
}
+static inline void do_sysctl_args(void)
+{
+}
#endif /* CONFIG_SYSCTL */
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 8e159e16850f..3a582ec7a2f1 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,10 +30,10 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (*handler)(int);
- char *help_msg;
- char *action_msg;
- int enable_mask;
+ void (* const handler)(int);
+ const char * const help_msg;
+ const char * const action_msg;
+ const int enable_mask;
};
#ifdef CONFIG_MAGIC_SYSRQ
@@ -45,9 +45,9 @@ struct sysrq_key_op {
void handle_sysrq(int key);
void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, struct sysrq_key_op *op);
-struct sysrq_key_op *__sysrq_get_key_op(int key);
+int register_sysrq_key(int key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
int sysrq_mask(void);
@@ -62,12 +62,12 @@ static inline void __handle_sysrq(int key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, struct sysrq_key_op *op)
+static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 1412e9cc79ce..d074302989dd 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -26,6 +26,7 @@
#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
+#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */
struct device;
struct tee_device;
@@ -166,6 +167,22 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * tee_session_calc_client_uuid() - Calculates client UUID for session
+ * @uuid: Resulting UUID
+ * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
+ * @connectuon_data: Connection data for opening session
+ *
+ * Based on connection method calculates UUIDv5 based client UUID.
+ *
+ * For group based logins verifies that calling process has specified
+ * credentials.
+ *
+ * @return < 0 on failure
+ */
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN]);
+
+/**
* struct tee_shm - shared memory object
* @ctx: context using the object
* @paddr: physical address of the shared memory
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c91b1e344d56..216185bb3014 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,20 +32,10 @@
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
-/* Default Thermal Governor */
-#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
-#define DEFAULT_THERMAL_GOVERNOR "step_wise"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE)
-#define DEFAULT_THERMAL_GOVERNOR "fair_share"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
-#define DEFAULT_THERMAL_GOVERNOR "user_space"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
-#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
-#endif
-
struct thermal_zone_device;
struct thermal_cooling_device;
struct thermal_instance;
+struct thermal_attr;
enum thermal_device_mode {
THERMAL_DEVICE_DISABLED = 0,
@@ -130,11 +120,6 @@ struct thermal_cooling_device {
struct list_head node;
};
-struct thermal_attr {
- struct device_attribute attr;
- char name[THERMAL_NAME_LENGTH];
-};
-
/**
* struct thermal_zone_device - structure for a thermal zone
* @id: unique id number for each thermal zone
@@ -347,21 +332,6 @@ struct thermal_zone_of_device_ops {
int (*set_trip_temp)(void *, int, int);
};
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @np: pointer to struct device_node that this trip point was created from
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
-
-struct thermal_trip {
- struct device_node *np;
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
-};
-
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
@@ -413,19 +383,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev,
#endif
-#if IS_ENABLED(CONFIG_THERMAL)
-static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
-{
- return cdev->ops->get_requested_power && cdev->ops->state2power &&
- cdev->ops->power2state;
-}
-
-int power_actor_get_max_power(struct thermal_cooling_device *,
- struct thermal_zone_device *tz, u32 *max_power);
-int power_actor_get_min_power(struct thermal_cooling_device *,
- struct thermal_zone_device *tz, u32 *min_power);
-int power_actor_set_power(struct thermal_cooling_device *,
- struct thermal_instance *, u32);
+#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
void *, struct thermal_zone_device_ops *,
struct thermal_zone_params *, int, int);
@@ -439,7 +397,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
-void thermal_zone_set_trips(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(const char *,
void *, const struct thermal_cooling_device_ops *);
@@ -457,24 +414,9 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
-int get_tz_trend(struct thermal_zone_device *, int);
-struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
- struct thermal_cooling_device *, int);
void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
#else
-static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
-{ return false; }
-static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *max_power)
-{ return 0; }
-static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
- u32 *min_power)
-{ return -ENODEV; }
-static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
- struct thermal_instance *tz, u32 power)
-{ return 0; }
static inline struct thermal_zone_device *thermal_zone_device_register(
const char *type, int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
@@ -484,21 +426,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register(
static inline void thermal_zone_device_unregister(
struct thermal_zone_device *tz)
{ }
-static inline int thermal_zone_bind_cooling_device(
- struct thermal_zone_device *tz, int trip,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight)
-{ return -ENODEV; }
-static inline int thermal_zone_unbind_cooling_device(
- struct thermal_zone_device *tz, int trip,
- struct thermal_cooling_device *cdev)
-{ return -ENODEV; }
-static inline void thermal_zone_device_update(struct thermal_zone_device *tz,
- enum thermal_notify_event event)
-{ }
-static inline void thermal_zone_set_trips(struct thermal_zone_device *tz)
-{ }
static inline struct thermal_cooling_device *
thermal_cooling_device_register(char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
@@ -530,12 +457,7 @@ static inline int thermal_zone_get_slope(
static inline int thermal_zone_get_offset(
struct thermal_zone_device *tz)
{ return -ENODEV; }
-static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
-{ return -ENODEV; }
-static inline struct thermal_instance *
-get_thermal_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int trip)
-{ return ERR_PTR(-ENODEV); }
+
static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
{ }
static inline void thermal_notify_framework(struct thermal_zone_device *tz,
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index ece782ef5466..ff397c0d5c07 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -80,7 +80,7 @@ struct tb {
int index;
enum tb_security_level security_level;
size_t nboot_acl;
- unsigned long privdata[0];
+ unsigned long privdata[];
};
extern struct bus_type tb_bus_type;
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
index 299cbb8c63bb..44073d06710f 100644
--- a/include/linux/tifm.h
+++ b/include/linux/tifm.h
@@ -124,7 +124,7 @@ struct tifm_adapter {
int (*has_ms_pif)(struct tifm_adapter *fm,
struct tifm_dev *sock);
- struct tifm_dev *sockets[0];
+ struct tifm_dev *sockets[];
};
struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 9de5c10293f5..c6abb79501b3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -3,33 +3,36 @@
#define _LINUX_U64_STATS_SYNC_H
/*
- * To properly implement 64bits network statistics on 32bit and 64bit hosts,
- * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ * Protect against 64-bit values tearing on 32-bit architectures. This is
+ * typically used for statistics read/update in different subsystems.
*
* Key points :
- * 1) Use a seqcount on SMP 32bits, with low overhead.
- * 2) Whole thing is a noop on 64bit arches or UP kernels.
- * 3) Write side must ensure mutual exclusion or one seqcount update could
+ *
+ * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
+ * - The whole thing is a no-op on 64-bit architectures.
+ *
+ * Usage constraints:
+ *
+ * 1) Write side must ensure mutual exclusion, or one seqcount update could
* be lost, thus blocking readers forever.
- * If this synchronization point is not a mutex, but a spinlock or
- * spinlock_bh() or disable_bh() :
- * 3.1) Write side should not sleep.
- * 3.2) Write side should not allow preemption.
- * 3.3) If applicable, interrupts should be disabled.
*
- * 4) If reader fetches several counters, there is no guarantee the whole values
- * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ * 2) Write side must disable preemption, or a seqcount reader can preempt the
+ * writer and also spin forever.
+ *
+ * 3) Write side must use the _irqsave() variant if other writers, or a reader,
+ * can be invoked from an IRQ context.
*
- * 5) readers are allowed to sleep or be preempted/interrupted : They perform
- * pure reads. But if they have to fetch many values, it's better to not allow
- * preemptions/interruptions to avoid many retries.
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent w.r.t. each other (remember point #2: seqcounts are not
+ * used for 64bit architectures).
*
- * 6) If counter might be written by an interrupt, readers should block interrupts.
- * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
- * read partial values)
+ * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
+ * pure reads.
*
- * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
- * u64_stats_fetch_retry_irq() helpers
+ * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
+ * might be updated from a hardirq or softirq context (remember point #1:
+ * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
+ * corrupted 64-bit values otherwise.
*
* Usage :
*
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 0e215e6d0534..454c2f6672d7 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -68,19 +68,21 @@ enum uacce_q_state {
* @uacce: pointer to uacce
* @priv: private pointer
* @wait: wait queue head
- * @list: index into uacce_mm
- * @uacce_mm: the corresponding mm
+ * @list: index into uacce queues list
* @qfrs: pointer of qfr regions
* @state: queue state machine
+ * @pasid: pasid associated to the mm
+ * @handle: iommu_sva handle returned by iommu_sva_bind_device()
*/
struct uacce_queue {
struct uacce_device *uacce;
void *priv;
wait_queue_head_t wait;
struct list_head list;
- struct uacce_mm *uacce_mm;
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
enum uacce_q_state state;
+ int pasid;
+ struct iommu_sva *handle;
};
/**
@@ -96,8 +98,8 @@ struct uacce_queue {
* @cdev: cdev of the uacce
* @dev: dev of the uacce
* @priv: private pointer of the uacce
- * @mm_list: list head of uacce_mm->list
- * @mm_lock: lock for mm_list
+ * @queues: list of queues
+ * @queues_lock: lock for queues list
* @inode: core vfs
*/
struct uacce_device {
@@ -112,27 +114,9 @@ struct uacce_device {
struct cdev *cdev;
struct device dev;
void *priv;
- struct list_head mm_list;
- struct mutex mm_lock;
- struct inode *inode;
-};
-
-/**
- * struct uacce_mm - keep track of queues bound to a process
- * @list: index into uacce_device
- * @queues: list of queues
- * @mm: the mm struct
- * @lock: protects the list of queues
- * @pasid: pasid of the uacce_mm
- * @handle: iommu_sva handle return from iommu_sva_bind_device
- */
-struct uacce_mm {
- struct list_head list;
struct list_head queues;
- struct mm_struct *mm;
- struct mutex lock;
- int pasid;
- struct iommu_sva *handle;
+ struct mutex queues_lock;
+ struct inode *inode;
};
#if IS_ENABLED(CONFIG_UACCE)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9861c89f93be..7bcadca22100 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,9 +2,9 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,7 +58,7 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -301,62 +301,20 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
-/*
- * probe_kernel_read(): safely attempt to read from a location
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
-extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
-extern long __probe_kernel_read(void *dst, const void *src, size_t size);
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size);
-/*
- * probe_user_read(): safely attempt to read from a location in user space
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long probe_user_read(void *dst, const void __user *src, size_t size);
-extern long __probe_user_read(void *dst, const void __user *src, size_t size);
-/*
- * probe_kernel_write(): safely attempt to write to a location
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
-extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
-
-/*
- * probe_user_write(): safely attempt to write to a location in user space
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
-extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
-
-extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
- long count);
-extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
- long count);
-extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
+
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
+ long count);
+
+long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
+ long count);
+long strnlen_user_nofault(const void __user *unsafe_addr, long count);
/**
* probe_kernel_address(): safely attempt to read from a location
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..54167a2d28ea 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -67,6 +67,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2
#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3
+#define CI_HDRC_CONTROLLER_VBUS_EVENT 4
int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 8675e145ea8b..2040696d75b6 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *);
int usb_interface_id(struct usb_configuration *, struct usb_function *);
+int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f,
+ struct usb_ep *_ep, u8 alt);
+
int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 9411c08a5c7e..6a178177e4c9 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -42,6 +42,8 @@ struct usb_ep;
* @num_mapped_sgs: number of SG entries mapped to DMA (internal)
* @length: Length of that data
* @stream_id: The stream id, when USB3.0 bulk streams are being used
+ * @is_last: Indicates if this is the last request of a stream_id before
+ * switching to a different stream (required for DWC3 controllers).
* @no_interrupt: If true, hints that no completion irq is needed.
* Helpful sometimes with deep request queues that are handled
* directly by DMA controllers.
@@ -104,6 +106,7 @@ struct usb_request {
unsigned num_mapped_sgs;
unsigned stream_id:16;
+ unsigned is_last:1;
unsigned no_interrupt:1;
unsigned zero:1;
unsigned short_not_ok:1;
@@ -373,6 +376,7 @@ struct usb_gadget_ops {
* @connected: True if gadget is connected.
* @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
* indicates that it supports LPM as per the LPM ECN & errata.
+ * @irq: the interrupt number for device controller.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -427,6 +431,7 @@ struct usb_gadget {
unsigned deactivated:1;
unsigned connected:1;
unsigned lpm_capable:1;
+ int irq;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -773,6 +778,9 @@ struct usb_gadget_string_container {
/* put descriptor for string with that id into buf (buflen >= 256) */
int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf);
+/* check if the given language identifier is valid */
+bool usb_validate_langid(u16 langid);
+
/*-------------------------------------------------------------------------*/
/* utility to simplify managing config descriptors */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index e12105ed3834..3dbb42c637c1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -479,7 +479,8 @@ extern void usb_hcd_platform_shutdown(struct platform_device *dev);
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
+ const struct pci_device_id *id,
+ const struct hc_driver *driver);
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index b00a2642a9cd..5daa1c49761c 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -254,6 +254,7 @@ int typec_set_mode(struct typec_port *port, int mode);
void *typec_get_drvdata(struct typec_port *port);
+int typec_find_orientation(const char *name);
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 5453af87a33e..239db794357c 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -18,6 +18,16 @@ struct vdpa_callback {
};
/**
+ * vDPA notification area
+ * @addr: base address of the notification area
+ * @size: size of the notification area
+ */
+struct vdpa_notification_area {
+ resource_size_t addr;
+ resource_size_t size;
+};
+
+/**
* vDPA device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
@@ -73,6 +83,10 @@ struct vdpa_device {
* @vdev: vdpa device
* @idx: virtqueue index
* Returns virtqueue state (last_avail_idx)
+ * @get_vq_notification: Get the notification area for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns the notifcation area
* @get_vq_align: Get the virtqueue align requirement
* for the device
* @vdev: vdpa device
@@ -162,6 +176,8 @@ struct vdpa_config_ops {
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
+ struct vdpa_notification_area
+ (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index 0e130b5077a5..2f9dd072f11f 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -10,38 +10,8 @@
#include <linux/device.h>
#include <linux/regmap.h>
-#define VEXPRESS_SITE_MB 0
-#define VEXPRESS_SITE_DB1 1
-#define VEXPRESS_SITE_DB2 2
-#define VEXPRESS_SITE_MASTER 0xf
-
-/* Config infrastructure */
-
-void vexpress_config_set_master(u32 site);
-u32 vexpress_config_get_master(void);
-
-void vexpress_config_lock(void *arg);
-void vexpress_config_unlock(void *arg);
-
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
- u32 *position, u32 *dcc);
-
-/* Config bridge API */
-
-struct vexpress_config_bridge_ops {
- struct regmap * (*regmap_init)(struct device *dev, void *context);
- void (*regmap_exit)(struct regmap *regmap, void *context);
-};
-
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context);
-
/* Config regmap API */
struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
-/* Platform control */
-
-void vexpress_flags_set(u32 data);
-
#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 5d92ee15d098..38d3c6a8dc7e 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -76,7 +76,9 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
- int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
+ int (*pin_pages)(void *iommu_data,
+ struct iommu_group *group,
+ unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn);
int (*unpin_pages)(void *iommu_data,
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ffef0f279747..24fc7c3ae7d6 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -35,6 +35,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_DIRECT_THROTTLE,
+ PGSCAN_ANON,
+ PGSCAN_FILE,
+ PGSTEAL_ANON,
+ PGSTEAL_FILE,
#ifdef CONFIG_NUMA
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index cb507151710f..aa961088c551 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -26,9 +26,11 @@ struct reclaim_stat {
unsigned nr_congested;
unsigned nr_writeback;
unsigned nr_immediate;
+ unsigned nr_pageout;
unsigned nr_activate[2];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
+ unsigned nr_lazyfree_fail;
};
enum writeback_stat_item {
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 9e2763d7c159..59bd50f99291 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -105,9 +105,9 @@ struct vringh_kiov {
/* Helpers for userspace vrings. */
int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used);
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used);
static inline void vringh_iov_init(struct vringh_iov *iov,
struct iovec *iovec, unsigned num)
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
new file mode 100644
index 000000000000..5e08db2adc31
--- /dev/null
+++ b/include/linux/watch_queue.h
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* User-mappable watch queue
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/watch_queue.rst
+ */
+
+#ifndef _LINUX_WATCH_QUEUE_H
+#define _LINUX_WATCH_QUEUE_H
+
+#include <uapi/linux/watch_queue.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+
+#ifdef CONFIG_WATCH_QUEUE
+
+struct cred;
+
+struct watch_type_filter {
+ enum watch_notification_type type;
+ __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */
+ __u32 info_filter; /* Filter on watch_notification::info */
+ __u32 info_mask; /* Mask of relevant bits in info_filter */
+};
+
+struct watch_filter {
+ union {
+ struct rcu_head rcu;
+ unsigned long type_filter[2]; /* Bitmask of accepted types */
+ };
+ u32 nr_filters; /* Number of filters */
+ struct watch_type_filter filters[];
+};
+
+struct watch_queue {
+ struct rcu_head rcu;
+ struct watch_filter __rcu *filter;
+ struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */
+ struct hlist_head watches; /* Contributory watches */
+ struct page **notes; /* Preallocated notifications */
+ unsigned long *notes_bitmap; /* Allocation bitmap for notes */
+ struct kref usage; /* Object usage count */
+ spinlock_t lock;
+ unsigned int nr_notes; /* Number of notes */
+ unsigned int nr_pages; /* Number of pages in notes[] */
+ bool defunct; /* T when queues closed */
+};
+
+/*
+ * Representation of a watch on an object.
+ */
+struct watch {
+ union {
+ struct rcu_head rcu;
+ u32 info_id; /* ID to be OR'd in to info field */
+ };
+ struct watch_queue __rcu *queue; /* Queue to post events to */
+ struct hlist_node queue_node; /* Link in queue->watches */
+ struct watch_list __rcu *watch_list;
+ struct hlist_node list_node; /* Link in watch_list->watchers */
+ const struct cred *cred; /* Creds of the owner of the watch */
+ void *private; /* Private data for the watched object */
+ u64 id; /* Internal identifier */
+ struct kref usage; /* Object usage count */
+};
+
+/*
+ * List of watches on an object.
+ */
+struct watch_list {
+ struct rcu_head rcu;
+ struct hlist_head watchers;
+ void (*release_watch)(struct watch *);
+ spinlock_t lock;
+};
+
+extern void __post_watch_notification(struct watch_list *,
+ struct watch_notification *,
+ const struct cred *,
+ u64);
+extern struct watch_queue *get_watch_queue(int);
+extern void put_watch_queue(struct watch_queue *);
+extern void init_watch(struct watch *, struct watch_queue *);
+extern int add_watch_to_object(struct watch *, struct watch_list *);
+extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool);
+extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int);
+extern long watch_queue_set_filter(struct pipe_inode_info *,
+ struct watch_notification_filter __user *);
+extern int watch_queue_init(struct pipe_inode_info *);
+extern void watch_queue_clear(struct watch_queue *);
+
+static inline void init_watch_list(struct watch_list *wlist,
+ void (*release_watch)(struct watch *))
+{
+ INIT_HLIST_HEAD(&wlist->watchers);
+ spin_lock_init(&wlist->lock);
+ wlist->release_watch = release_watch;
+}
+
+static inline void post_watch_notification(struct watch_list *wlist,
+ struct watch_notification *n,
+ const struct cred *cred,
+ u64 id)
+{
+ if (unlikely(wlist))
+ __post_watch_notification(wlist, n, cred, id);
+}
+
+static inline void remove_watch_list(struct watch_list *wlist, u64 id)
+{
+ if (wlist) {
+ remove_watch_from_object(wlist, NULL, id, true);
+ kfree_rcu(wlist, rcu);
+ }
+}
+
+/**
+ * watch_sizeof - Calculate the information part of the size of a watch record,
+ * given the structure size.
+ */
+#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT)
+
+#endif
+
+#endif /* _LINUX_WATCH_QUEUE_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 8b505d22fc0e..26de0cae2a0a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -62,7 +62,7 @@ enum {
WORK_CPU_UNBOUND = NR_CPUS,
/*
- * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
+ * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
* This makes pwqs aligned to 256 bytes and allows 15 workqueue
* flush colors.
*/
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f8a7e1a850fb..8e5c5bb16e2d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -197,6 +197,7 @@ void wakeup_flusher_threads(enum wb_reason reason);
void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
+void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 14c893433139..b4d70e7568b2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -576,7 +576,7 @@ void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
- * Return: The entry which used to be at this index.
+ * Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
@@ -602,7 +602,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
- * Return: The entry which used to be at this index.
+ * Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)